1 // 2 // Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 #ifdef _LP64 315 // 64-bit build means 64-bit pointers means hi/lo pairs 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 320 // Lock encodings use G3 and G4 internally 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 325 // Special class for storeP instructions, which can store SP or RPC to TLS. 326 // It is also used for memory addressing, allowing direct TLS addressing. 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register 332 // We use it to save R_G2 across calls out of Java. 333 reg_class l7_regP(R_L7H,R_L7); 334 335 // Other special pointer regs 336 reg_class g1_regP(R_G1H,R_G1); 337 reg_class g2_regP(R_G2H,R_G2); 338 reg_class g3_regP(R_G3H,R_G3); 339 reg_class g4_regP(R_G4H,R_G4); 340 reg_class g5_regP(R_G5H,R_G5); 341 reg_class i0_regP(R_I0H,R_I0); 342 reg_class o0_regP(R_O0H,R_O0); 343 reg_class o1_regP(R_O1H,R_O1); 344 reg_class o2_regP(R_O2H,R_O2); 345 reg_class o7_regP(R_O7H,R_O7); 346 347 #else // _LP64 348 // 32-bit build means 32-bit pointers means 1 register. 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5, 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 353 // Lock encodings use G3 and G4 internally 354 reg_class lock_ptr_reg(R_G1, R_G5, 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 358 // Special class for storeP instructions, which can store SP or RPC to TLS. 359 // It is also used for memory addressing, allowing direct TLS addressing. 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5, 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP, 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP); 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register 365 // We use it to save R_G2 across calls out of Java. 366 reg_class l7_regP(R_L7); 367 368 // Other special pointer regs 369 reg_class g1_regP(R_G1); 370 reg_class g2_regP(R_G2); 371 reg_class g3_regP(R_G3); 372 reg_class g4_regP(R_G4); 373 reg_class g5_regP(R_G5); 374 reg_class i0_regP(R_I0); 375 reg_class o0_regP(R_O0); 376 reg_class o1_regP(R_O1); 377 reg_class o2_regP(R_O2); 378 reg_class o7_regP(R_O7); 379 #endif // _LP64 380 381 382 // ---------------------------- 383 // Long Register Classes 384 // ---------------------------- 385 // Longs in 1 register. Aligned adjacent hi/lo pairs. 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 389 #ifdef _LP64 390 // 64-bit, longs in 1 register: use all 64-bit integer registers 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 394 #endif // _LP64 395 ); 396 397 reg_class g1_regL(R_G1H,R_G1); 398 reg_class g3_regL(R_G3H,R_G3); 399 reg_class o2_regL(R_O2H,R_O2); 400 reg_class o7_regL(R_O7H,R_O7); 401 402 // ---------------------------- 403 // Special Class for Condition Code Flags Register 404 reg_class int_flags(CCR); 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 406 reg_class float_flag0(FCC0); 407 408 409 // ---------------------------- 410 // Float Point Register Classes 411 // ---------------------------- 412 // Skip F30/F31, they are reserved for mem-mem copies 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 414 415 // Paired floating point registers--they show up in the same order as the floats, 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 419 /* Use extra V9 double registers; this AD file does not support V8 */ 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 422 ); 423 424 // Paired floating point registers--they show up in the same order as the floats, 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 426 // This class is usable for mis-aligned loads as happen in I2C adapters. 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 429 %} 430 431 //----------DEFINITION BLOCK--------------------------------------------------- 432 // Define name --> value mappings to inform the ADLC of an integer valued name 433 // Current support includes integer values in the range [0, 0x7FFFFFFF] 434 // Format: 435 // int_def <name> ( <int_value>, <expression>); 436 // Generated Code in ad_<arch>.hpp 437 // #define <name> (<expression>) 438 // // value == <int_value> 439 // Generated code in ad_<arch>.cpp adlc_verification() 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 441 // 442 definitions %{ 443 // The default cost (of an ALU instruction). 444 int_def DEFAULT_COST ( 100, 100); 445 int_def HUGE_COST (1000000, 1000000); 446 447 // Memory refs are twice as expensive as run-of-the-mill. 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 449 450 // Branches are even more expensive. 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 452 int_def CALL_COST ( 300, DEFAULT_COST * 3); 453 %} 454 455 456 //----------SOURCE BLOCK------------------------------------------------------- 457 // This is a block of C++ code which provides values, functions, and 458 // definitions necessary in the rest of the architecture description 459 source_hpp %{ 460 // Must be visible to the DFA in dfa_sparc.cpp 461 extern bool can_branch_register( Node *bol, Node *cmp ); 462 463 extern bool use_block_zeroing(Node* count); 464 465 // Macros to extract hi & lo halves from a long pair. 466 // G0 is not part of any long pair, so assert on that. 467 // Prevents accidentally using G1 instead of G0. 468 #define LONG_HI_REG(x) (x) 469 #define LONG_LO_REG(x) (x) 470 471 %} 472 473 source %{ 474 #define __ _masm. 475 476 // tertiary op of a LoadP or StoreP encoding 477 #define REGP_OP true 478 479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 481 static Register reg_to_register_object(int register_encoding); 482 483 // Used by the DFA in dfa_sparc.cpp. 484 // Check for being able to use a V9 branch-on-register. Requires a 485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 486 // extended. Doesn't work following an integer ADD, for example, because of 487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 489 // replace them with zero, which could become sign-extension in a different OS 490 // release. There's no obvious reason why an interrupt will ever fill these 491 // bits with non-zero junk (the registers are reloaded with standard LD 492 // instructions which either zero-fill or sign-fill). 493 bool can_branch_register( Node *bol, Node *cmp ) { 494 if( !BranchOnRegister ) return false; 495 #ifdef _LP64 496 if( cmp->Opcode() == Op_CmpP ) 497 return true; // No problems with pointer compares 498 #endif 499 if( cmp->Opcode() == Op_CmpL ) 500 return true; // No problems with long compares 501 502 if( !SparcV9RegsHiBitsZero ) return false; 503 if( bol->as_Bool()->_test._test != BoolTest::ne && 504 bol->as_Bool()->_test._test != BoolTest::eq ) 505 return false; 506 507 // Check for comparing against a 'safe' value. Any operation which 508 // clears out the high word is safe. Thus, loads and certain shifts 509 // are safe, as are non-negative constants. Any operation which 510 // preserves zero bits in the high word is safe as long as each of its 511 // inputs are safe. Thus, phis and bitwise booleans are safe if their 512 // inputs are safe. At present, the only important case to recognize 513 // seems to be loads. Constants should fold away, and shifts & 514 // logicals can use the 'cc' forms. 515 Node *x = cmp->in(1); 516 if( x->is_Load() ) return true; 517 if( x->is_Phi() ) { 518 for( uint i = 1; i < x->req(); i++ ) 519 if( !x->in(i)->is_Load() ) 520 return false; 521 return true; 522 } 523 return false; 524 } 525 526 bool use_block_zeroing(Node* count) { 527 // Use BIS for zeroing if count is not constant 528 // or it is >= BlockZeroingLowLimit. 529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 530 } 531 532 // **************************************************************************** 533 534 // REQUIRED FUNCTIONALITY 535 536 // !!!!! Special hack to get all type of calls to specify the byte offset 537 // from the start of the call to the point where the return address 538 // will point. 539 // The "return address" is the address of the call instruction, plus 8. 540 541 int MachCallStaticJavaNode::ret_addr_offset() { 542 int offset = NativeCall::instruction_size; // call; delay slot 543 if (_method_handle_invoke) 544 offset += 4; // restore SP 545 return offset; 546 } 547 548 int MachCallDynamicJavaNode::ret_addr_offset() { 549 int vtable_index = this->_vtable_index; 550 if (vtable_index < 0) { 551 // must be invalid_vtable_index, not nonvirtual_vtable_index 552 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 553 return (NativeMovConstReg::instruction_size + 554 NativeCall::instruction_size); // sethi; setlo; call; delay slot 555 } else { 556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 557 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 559 int klass_load_size; 560 if (UseCompressedClassPointers) { 561 assert(Universe::heap() != NULL, "java heap should be initialized"); 562 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 563 } else { 564 klass_load_size = 1*BytesPerInstWord; 565 } 566 if (Assembler::is_simm13(v_off)) { 567 return klass_load_size + 568 (2*BytesPerInstWord + // ld_ptr, ld_ptr 569 NativeCall::instruction_size); // call; delay slot 570 } else { 571 return klass_load_size + 572 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 573 NativeCall::instruction_size); // call; delay slot 574 } 575 } 576 } 577 578 int MachCallRuntimeNode::ret_addr_offset() { 579 #ifdef _LP64 580 if (MacroAssembler::is_far_target(entry_point())) { 581 return NativeFarCall::instruction_size; 582 } else { 583 return NativeCall::instruction_size; 584 } 585 #else 586 return NativeCall::instruction_size; // call; delay slot 587 #endif 588 } 589 590 // Indicate if the safepoint node needs the polling page as an input. 591 // Since Sparc does not have absolute addressing, it does. 592 bool SafePointNode::needs_polling_address_input() { 593 return true; 594 } 595 596 // emit an interrupt that is caught by the debugger (for debugging compiler) 597 void emit_break(CodeBuffer &cbuf) { 598 MacroAssembler _masm(&cbuf); 599 __ breakpoint_trap(); 600 } 601 602 #ifndef PRODUCT 603 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 604 st->print("TA"); 605 } 606 #endif 607 608 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 609 emit_break(cbuf); 610 } 611 612 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 613 return MachNode::size(ra_); 614 } 615 616 // Traceable jump 617 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 618 MacroAssembler _masm(&cbuf); 619 Register rdest = reg_to_register_object(jump_target); 620 __ JMP(rdest, 0); 621 __ delayed()->nop(); 622 } 623 624 // Traceable jump and set exception pc 625 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 626 MacroAssembler _masm(&cbuf); 627 Register rdest = reg_to_register_object(jump_target); 628 __ JMP(rdest, 0); 629 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 630 } 631 632 void emit_nop(CodeBuffer &cbuf) { 633 MacroAssembler _masm(&cbuf); 634 __ nop(); 635 } 636 637 void emit_illtrap(CodeBuffer &cbuf) { 638 MacroAssembler _masm(&cbuf); 639 __ illtrap(0); 640 } 641 642 643 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 644 assert(n->rule() != loadUB_rule, ""); 645 646 intptr_t offset = 0; 647 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 648 const Node* addr = n->get_base_and_disp(offset, adr_type); 649 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 650 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 651 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 652 atype = atype->add_offset(offset); 653 assert(disp32 == offset, "wrong disp32"); 654 return atype->_offset; 655 } 656 657 658 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 659 assert(n->rule() != loadUB_rule, ""); 660 661 intptr_t offset = 0; 662 Node* addr = n->in(2); 663 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 664 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 665 Node* a = addr->in(2/*AddPNode::Address*/); 666 Node* o = addr->in(3/*AddPNode::Offset*/); 667 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 668 atype = a->bottom_type()->is_ptr()->add_offset(offset); 669 assert(atype->isa_oop_ptr(), "still an oop"); 670 } 671 offset = atype->is_ptr()->_offset; 672 if (offset != Type::OffsetBot) offset += disp32; 673 return offset; 674 } 675 676 static inline jdouble replicate_immI(int con, int count, int width) { 677 // Load a constant replicated "count" times with width "width" 678 assert(count*width == 8 && width <= 4, "sanity"); 679 int bit_width = width * 8; 680 jlong val = con; 681 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 682 for (int i = 0; i < count - 1; i++) { 683 val |= (val << bit_width); 684 } 685 jdouble dval = *((jdouble*) &val); // coerce to double type 686 return dval; 687 } 688 689 static inline jdouble replicate_immF(float con) { 690 // Replicate float con 2 times and pack into vector. 691 int val = *((int*)&con); 692 jlong lval = val; 693 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 694 jdouble dval = *((jdouble*) &lval); // coerce to double type 695 return dval; 696 } 697 698 // Standard Sparc opcode form2 field breakdown 699 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 700 f0 &= (1<<19)-1; // Mask displacement to 19 bits 701 int op = (f30 << 30) | 702 (f29 << 29) | 703 (f25 << 25) | 704 (f22 << 22) | 705 (f20 << 20) | 706 (f19 << 19) | 707 (f0 << 0); 708 cbuf.insts()->emit_int32(op); 709 } 710 711 // Standard Sparc opcode form2 field breakdown 712 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 713 f0 >>= 10; // Drop 10 bits 714 f0 &= (1<<22)-1; // Mask displacement to 22 bits 715 int op = (f30 << 30) | 716 (f25 << 25) | 717 (f22 << 22) | 718 (f0 << 0); 719 cbuf.insts()->emit_int32(op); 720 } 721 722 // Standard Sparc opcode form3 field breakdown 723 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 724 int op = (f30 << 30) | 725 (f25 << 25) | 726 (f19 << 19) | 727 (f14 << 14) | 728 (f5 << 5) | 729 (f0 << 0); 730 cbuf.insts()->emit_int32(op); 731 } 732 733 // Standard Sparc opcode form3 field breakdown 734 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 735 simm13 &= (1<<13)-1; // Mask to 13 bits 736 int op = (f30 << 30) | 737 (f25 << 25) | 738 (f19 << 19) | 739 (f14 << 14) | 740 (1 << 13) | // bit to indicate immediate-mode 741 (simm13<<0); 742 cbuf.insts()->emit_int32(op); 743 } 744 745 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 746 simm10 &= (1<<10)-1; // Mask to 10 bits 747 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 748 } 749 750 #ifdef ASSERT 751 // Helper function for VerifyOops in emit_form3_mem_reg 752 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 753 warning("VerifyOops encountered unexpected instruction:"); 754 n->dump(2); 755 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 756 } 757 #endif 758 759 760 void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary, 761 int src1_enc, int disp32, int src2_enc, int dst_enc) { 762 763 #ifdef ASSERT 764 // The following code implements the +VerifyOops feature. 765 // It verifies oop values which are loaded into or stored out of 766 // the current method activation. +VerifyOops complements techniques 767 // like ScavengeALot, because it eagerly inspects oops in transit, 768 // as they enter or leave the stack, as opposed to ScavengeALot, 769 // which inspects oops "at rest", in the stack or heap, at safepoints. 770 // For this reason, +VerifyOops can sometimes detect bugs very close 771 // to their point of creation. It can also serve as a cross-check 772 // on the validity of oop maps, when used toegether with ScavengeALot. 773 774 // It would be good to verify oops at other points, especially 775 // when an oop is used as a base pointer for a load or store. 776 // This is presently difficult, because it is hard to know when 777 // a base address is biased or not. (If we had such information, 778 // it would be easy and useful to make a two-argument version of 779 // verify_oop which unbiases the base, and performs verification.) 780 781 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 782 bool is_verified_oop_base = false; 783 bool is_verified_oop_load = false; 784 bool is_verified_oop_store = false; 785 int tmp_enc = -1; 786 if (VerifyOops && src1_enc != R_SP_enc) { 787 // classify the op, mainly for an assert check 788 int st_op = 0, ld_op = 0; 789 switch (primary) { 790 case Assembler::stb_op3: st_op = Op_StoreB; break; 791 case Assembler::sth_op3: st_op = Op_StoreC; break; 792 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 793 case Assembler::stw_op3: st_op = Op_StoreI; break; 794 case Assembler::std_op3: st_op = Op_StoreL; break; 795 case Assembler::stf_op3: st_op = Op_StoreF; break; 796 case Assembler::stdf_op3: st_op = Op_StoreD; break; 797 798 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 799 case Assembler::ldub_op3: ld_op = Op_LoadUB; break; 800 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 801 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 802 case Assembler::ldx_op3: // may become LoadP or stay LoadI 803 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 804 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 805 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 806 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 807 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 808 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 809 810 default: ShouldNotReachHere(); 811 } 812 if (tertiary == REGP_OP) { 813 if (st_op == Op_StoreI) st_op = Op_StoreP; 814 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 815 else ShouldNotReachHere(); 816 if (st_op) { 817 // a store 818 // inputs are (0:control, 1:memory, 2:address, 3:value) 819 Node* n2 = n->in(3); 820 if (n2 != NULL) { 821 const Type* t = n2->bottom_type(); 822 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 823 } 824 } else { 825 // a load 826 const Type* t = n->bottom_type(); 827 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 828 } 829 } 830 831 if (ld_op) { 832 // a Load 833 // inputs are (0:control, 1:memory, 2:address) 834 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 835 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 836 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 837 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 838 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 839 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 840 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 841 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 842 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 843 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 844 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 845 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) && 846 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) && 847 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 848 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) && 849 !(n->rule() == loadUB_rule)) { 850 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 851 } 852 } else if (st_op) { 853 // a Store 854 // inputs are (0:control, 1:memory, 2:address, 3:value) 855 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 856 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 857 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 858 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 859 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 860 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) && 861 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 862 verify_oops_warning(n, n->ideal_Opcode(), st_op); 863 } 864 } 865 866 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 867 Node* addr = n->in(2); 868 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 869 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 870 if (atype != NULL) { 871 intptr_t offset = get_offset_from_base(n, atype, disp32); 872 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 873 if (offset != offset_2) { 874 get_offset_from_base(n, atype, disp32); 875 get_offset_from_base_2(n, atype, disp32); 876 } 877 assert(offset == offset_2, "different offsets"); 878 if (offset == disp32) { 879 // we now know that src1 is a true oop pointer 880 is_verified_oop_base = true; 881 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 882 if( primary == Assembler::ldd_op3 ) { 883 is_verified_oop_base = false; // Cannot 'ldd' into O7 884 } else { 885 tmp_enc = dst_enc; 886 dst_enc = R_O7_enc; // Load into O7; preserve source oop 887 assert(src1_enc != dst_enc, ""); 888 } 889 } 890 } 891 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 892 || offset == oopDesc::mark_offset_in_bytes())) { 893 // loading the mark should not be allowed either, but 894 // we don't check this since it conflicts with InlineObjectHash 895 // usage of LoadINode to get the mark. We could keep the 896 // check if we create a new LoadMarkNode 897 // but do not verify the object before its header is initialized 898 ShouldNotReachHere(); 899 } 900 } 901 } 902 } 903 } 904 #endif 905 906 uint instr; 907 instr = (Assembler::ldst_op << 30) 908 | (dst_enc << 25) 909 | (primary << 19) 910 | (src1_enc << 14); 911 912 uint index = src2_enc; 913 int disp = disp32; 914 915 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) { 916 disp += STACK_BIAS; 917 // Quick fix for JDK-8029668: check that stack offset fits, bailout if not 918 if (!Assembler::is_simm13(disp)) { 919 ra->C->record_method_not_compilable("unable to handle large constant offsets"); 920 return; 921 } 922 } 923 924 // We should have a compiler bailout here rather than a guarantee. 925 // Better yet would be some mechanism to handle variable-size matches correctly. 926 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); 927 928 if( disp == 0 ) { 929 // use reg-reg form 930 // bit 13 is already zero 931 instr |= index; 932 } else { 933 // use reg-imm form 934 instr |= 0x00002000; // set bit 13 to one 935 instr |= disp & 0x1FFF; 936 } 937 938 cbuf.insts()->emit_int32(instr); 939 940 #ifdef ASSERT 941 { 942 MacroAssembler _masm(&cbuf); 943 if (is_verified_oop_base) { 944 __ verify_oop(reg_to_register_object(src1_enc)); 945 } 946 if (is_verified_oop_store) { 947 __ verify_oop(reg_to_register_object(dst_enc)); 948 } 949 if (tmp_enc != -1) { 950 __ mov(O7, reg_to_register_object(tmp_enc)); 951 } 952 if (is_verified_oop_load) { 953 __ verify_oop(reg_to_register_object(dst_enc)); 954 } 955 } 956 #endif 957 } 958 959 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { 960 // The method which records debug information at every safepoint 961 // expects the call to be the first instruction in the snippet as 962 // it creates a PcDesc structure which tracks the offset of a call 963 // from the start of the codeBlob. This offset is computed as 964 // code_end() - code_begin() of the code which has been emitted 965 // so far. 966 // In this particular case we have skirted around the problem by 967 // putting the "mov" instruction in the delay slot but the problem 968 // may bite us again at some other point and a cleaner/generic 969 // solution using relocations would be needed. 970 MacroAssembler _masm(&cbuf); 971 __ set_inst_mark(); 972 973 // We flush the current window just so that there is a valid stack copy 974 // the fact that the current window becomes active again instantly is 975 // not a problem there is nothing live in it. 976 977 #ifdef ASSERT 978 int startpos = __ offset(); 979 #endif /* ASSERT */ 980 981 __ call((address)entry_point, rtype); 982 983 if (preserve_g2) __ delayed()->mov(G2, L7); 984 else __ delayed()->nop(); 985 986 if (preserve_g2) __ mov(L7, G2); 987 988 #ifdef ASSERT 989 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 990 #ifdef _LP64 991 // Trash argument dump slots. 992 __ set(0xb0b8ac0db0b8ac0d, G1); 993 __ mov(G1, G5); 994 __ stx(G1, SP, STACK_BIAS + 0x80); 995 __ stx(G1, SP, STACK_BIAS + 0x88); 996 __ stx(G1, SP, STACK_BIAS + 0x90); 997 __ stx(G1, SP, STACK_BIAS + 0x98); 998 __ stx(G1, SP, STACK_BIAS + 0xA0); 999 __ stx(G1, SP, STACK_BIAS + 0xA8); 1000 #else // _LP64 1001 // this is also a native call, so smash the first 7 stack locations, 1002 // and the various registers 1003 1004 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset], 1005 // while [SP+0x44..0x58] are the argument dump slots. 1006 __ set((intptr_t)0xbaadf00d, G1); 1007 __ mov(G1, G5); 1008 __ sllx(G1, 32, G1); 1009 __ or3(G1, G5, G1); 1010 __ mov(G1, G5); 1011 __ stx(G1, SP, 0x40); 1012 __ stx(G1, SP, 0x48); 1013 __ stx(G1, SP, 0x50); 1014 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot 1015 #endif // _LP64 1016 } 1017 #endif /*ASSERT*/ 1018 } 1019 1020 //============================================================================= 1021 // REQUIRED FUNCTIONALITY for encoding 1022 void emit_lo(CodeBuffer &cbuf, int val) { } 1023 void emit_hi(CodeBuffer &cbuf, int val) { } 1024 1025 1026 //============================================================================= 1027 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1028 1029 int Compile::ConstantTable::calculate_table_base_offset() const { 1030 if (UseRDPCForConstantTableBase) { 1031 // The table base offset might be less but then it fits into 1032 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1033 return Assembler::min_simm13(); 1034 } else { 1035 int offset = -(size() / 2); 1036 if (!Assembler::is_simm13(offset)) { 1037 offset = Assembler::min_simm13(); 1038 } 1039 return offset; 1040 } 1041 } 1042 1043 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1044 Compile* C = ra_->C; 1045 Compile::ConstantTable& constant_table = C->constant_table(); 1046 MacroAssembler _masm(&cbuf); 1047 1048 Register r = as_Register(ra_->get_encode(this)); 1049 CodeSection* consts_section = __ code()->consts(); 1050 int consts_size = consts_section->align_at_start(consts_section->size()); 1051 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size)); 1052 1053 if (UseRDPCForConstantTableBase) { 1054 // For the following RDPC logic to work correctly the consts 1055 // section must be allocated right before the insts section. This 1056 // assert checks for that. The layout and the SECT_* constants 1057 // are defined in src/share/vm/asm/codeBuffer.hpp. 1058 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1059 int insts_offset = __ offset(); 1060 1061 // Layout: 1062 // 1063 // |----------- consts section ------------|----------- insts section -----------... 1064 // |------ constant table -----|- padding -|------------------x---- 1065 // \ current PC (RDPC instruction) 1066 // |<------------- consts_size ----------->|<- insts_offset ->| 1067 // \ table base 1068 // The table base offset is later added to the load displacement 1069 // so it has to be negative. 1070 int table_base_offset = -(consts_size + insts_offset); 1071 int disp; 1072 1073 // If the displacement from the current PC to the constant table 1074 // base fits into simm13 we set the constant table base to the 1075 // current PC. 1076 if (Assembler::is_simm13(table_base_offset)) { 1077 constant_table.set_table_base_offset(table_base_offset); 1078 disp = 0; 1079 } else { 1080 // Otherwise we set the constant table base offset to the 1081 // maximum negative displacement of load instructions to keep 1082 // the disp as small as possible: 1083 // 1084 // |<------------- consts_size ----------->|<- insts_offset ->| 1085 // |<--------- min_simm13 --------->|<-------- disp --------->| 1086 // \ table base 1087 table_base_offset = Assembler::min_simm13(); 1088 constant_table.set_table_base_offset(table_base_offset); 1089 disp = (consts_size + insts_offset) + table_base_offset; 1090 } 1091 1092 __ rdpc(r); 1093 1094 if (disp != 0) { 1095 assert(r != O7, "need temporary"); 1096 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1097 } 1098 } 1099 else { 1100 // Materialize the constant table base. 1101 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1102 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1103 AddressLiteral base(baseaddr, rspec); 1104 __ set(base, r); 1105 } 1106 } 1107 1108 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1109 if (UseRDPCForConstantTableBase) { 1110 // This is really the worst case but generally it's only 1 instruction. 1111 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1112 } else { 1113 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1114 } 1115 } 1116 1117 #ifndef PRODUCT 1118 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1119 char reg[128]; 1120 ra_->dump_register(this, reg); 1121 if (UseRDPCForConstantTableBase) { 1122 st->print("RDPC %s\t! constant table base", reg); 1123 } else { 1124 st->print("SET &constanttable,%s\t! constant table base", reg); 1125 } 1126 } 1127 #endif 1128 1129 1130 //============================================================================= 1131 1132 #ifndef PRODUCT 1133 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1134 Compile* C = ra_->C; 1135 1136 for (int i = 0; i < OptoPrologueNops; i++) { 1137 st->print_cr("NOP"); st->print("\t"); 1138 } 1139 1140 if( VerifyThread ) { 1141 st->print_cr("Verify_Thread"); st->print("\t"); 1142 } 1143 1144 size_t framesize = C->frame_slots() << LogBytesPerInt; 1145 1146 // Calls to C2R adapters often do not accept exceptional returns. 1147 // We require that their callers must bang for them. But be careful, because 1148 // some VM calls (such as call site linkage) can use several kilobytes of 1149 // stack. But the stack safety zone should account for that. 1150 // See bugs 4446381, 4468289, 4497237. 1151 if (C->need_stack_bang(framesize)) { 1152 st->print_cr("! stack bang"); st->print("\t"); 1153 } 1154 1155 if (Assembler::is_simm13(-framesize)) { 1156 st->print ("SAVE R_SP,-%d,R_SP",framesize); 1157 } else { 1158 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t"); 1159 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t"); 1160 st->print ("SAVE R_SP,R_G3,R_SP"); 1161 } 1162 1163 } 1164 #endif 1165 1166 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1167 Compile* C = ra_->C; 1168 MacroAssembler _masm(&cbuf); 1169 1170 for (int i = 0; i < OptoPrologueNops; i++) { 1171 __ nop(); 1172 } 1173 1174 __ verify_thread(); 1175 1176 size_t framesize = C->frame_slots() << LogBytesPerInt; 1177 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1178 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1179 1180 // Calls to C2R adapters often do not accept exceptional returns. 1181 // We require that their callers must bang for them. But be careful, because 1182 // some VM calls (such as call site linkage) can use several kilobytes of 1183 // stack. But the stack safety zone should account for that. 1184 // See bugs 4446381, 4468289, 4497237. 1185 if (C->need_stack_bang(framesize)) { 1186 __ generate_stack_overflow_check(framesize); 1187 } 1188 1189 if (Assembler::is_simm13(-framesize)) { 1190 __ save(SP, -framesize, SP); 1191 } else { 1192 __ sethi(-framesize & ~0x3ff, G3); 1193 __ add(G3, -framesize & 0x3ff, G3); 1194 __ save(SP, G3, SP); 1195 } 1196 C->set_frame_complete( __ offset() ); 1197 1198 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1199 // NOTE: We set the table base offset here because users might be 1200 // emitted before MachConstantBaseNode. 1201 Compile::ConstantTable& constant_table = C->constant_table(); 1202 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1203 } 1204 } 1205 1206 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1207 return MachNode::size(ra_); 1208 } 1209 1210 int MachPrologNode::reloc() const { 1211 return 10; // a large enough number 1212 } 1213 1214 //============================================================================= 1215 #ifndef PRODUCT 1216 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1217 Compile* C = ra_->C; 1218 1219 if( do_polling() && ra_->C->is_method_compilation() ) { 1220 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1221 #ifdef _LP64 1222 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1223 #else 1224 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t"); 1225 #endif 1226 } 1227 1228 if( do_polling() ) 1229 st->print("RET\n\t"); 1230 1231 st->print("RESTORE"); 1232 } 1233 #endif 1234 1235 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1236 MacroAssembler _masm(&cbuf); 1237 Compile* C = ra_->C; 1238 1239 __ verify_thread(); 1240 1241 // If this does safepoint polling, then do it here 1242 if( do_polling() && ra_->C->is_method_compilation() ) { 1243 AddressLiteral polling_page(os::get_polling_page()); 1244 __ sethi(polling_page, L0); 1245 __ relocate(relocInfo::poll_return_type); 1246 __ ld_ptr( L0, 0, G0 ); 1247 } 1248 1249 // If this is a return, then stuff the restore in the delay slot 1250 if( do_polling() ) { 1251 __ ret(); 1252 __ delayed()->restore(); 1253 } else { 1254 __ restore(); 1255 } 1256 } 1257 1258 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1259 return MachNode::size(ra_); 1260 } 1261 1262 int MachEpilogNode::reloc() const { 1263 return 16; // a large enough number 1264 } 1265 1266 const Pipeline * MachEpilogNode::pipeline() const { 1267 return MachNode::pipeline_class(); 1268 } 1269 1270 int MachEpilogNode::safepoint_offset() const { 1271 assert( do_polling(), "no return for this epilog node"); 1272 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1273 } 1274 1275 //============================================================================= 1276 1277 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1278 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1279 static enum RC rc_class( OptoReg::Name reg ) { 1280 if( !OptoReg::is_valid(reg) ) return rc_bad; 1281 if (OptoReg::is_stack(reg)) return rc_stack; 1282 VMReg r = OptoReg::as_VMReg(reg); 1283 if (r->is_Register()) return rc_int; 1284 assert(r->is_FloatRegister(), "must be"); 1285 return rc_float; 1286 } 1287 1288 static int impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) { 1289 if (cbuf) { 1290 emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1291 } 1292 #ifndef PRODUCT 1293 else if (!do_size) { 1294 if (size != 0) st->print("\n\t"); 1295 if (is_load) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg)); 1296 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset); 1297 } 1298 #endif 1299 return size+4; 1300 } 1301 1302 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) { 1303 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] ); 1304 #ifndef PRODUCT 1305 else if( !do_size ) { 1306 if( size != 0 ) st->print("\n\t"); 1307 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst)); 1308 } 1309 #endif 1310 return size+4; 1311 } 1312 1313 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, 1314 PhaseRegAlloc *ra_, 1315 bool do_size, 1316 outputStream* st ) const { 1317 // Get registers to move 1318 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1319 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1320 OptoReg::Name dst_second = ra_->get_reg_second(this ); 1321 OptoReg::Name dst_first = ra_->get_reg_first(this ); 1322 1323 enum RC src_second_rc = rc_class(src_second); 1324 enum RC src_first_rc = rc_class(src_first); 1325 enum RC dst_second_rc = rc_class(dst_second); 1326 enum RC dst_first_rc = rc_class(dst_first); 1327 1328 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); 1329 1330 // Generate spill code! 1331 int size = 0; 1332 1333 if( src_first == dst_first && src_second == dst_second ) 1334 return size; // Self copy, no move 1335 1336 // -------------------------------------- 1337 // Check for mem-mem move. Load into unused float registers and fall into 1338 // the float-store case. 1339 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1340 int offset = ra_->reg2offset(src_first); 1341 // Further check for aligned-adjacent pair, so we can use a double load 1342 if( (src_first&1)==0 && src_first+1 == src_second ) { 1343 src_second = OptoReg::Name(R_F31_num); 1344 src_second_rc = rc_float; 1345 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st); 1346 } else { 1347 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st); 1348 } 1349 src_first = OptoReg::Name(R_F30_num); 1350 src_first_rc = rc_float; 1351 } 1352 1353 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1354 int offset = ra_->reg2offset(src_second); 1355 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st); 1356 src_second = OptoReg::Name(R_F31_num); 1357 src_second_rc = rc_float; 1358 } 1359 1360 // -------------------------------------- 1361 // Check for float->int copy; requires a trip through memory 1362 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1363 int offset = frame::register_save_words*wordSize; 1364 if (cbuf) { 1365 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); 1366 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1367 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1368 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 ); 1369 } 1370 #ifndef PRODUCT 1371 else if (!do_size) { 1372 if (size != 0) st->print("\n\t"); 1373 st->print( "SUB R_SP,16,R_SP\n"); 1374 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1375 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1376 st->print("\tADD R_SP,16,R_SP\n"); 1377 } 1378 #endif 1379 size += 16; 1380 } 1381 1382 // Check for float->int copy on T4 1383 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1384 // Further check for aligned-adjacent pair, so we can use a double move 1385 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1386 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st); 1387 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st); 1388 } 1389 // Check for int->float copy on T4 1390 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1391 // Further check for aligned-adjacent pair, so we can use a double move 1392 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1393 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st); 1394 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st); 1395 } 1396 1397 // -------------------------------------- 1398 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1399 // In such cases, I have to do the big-endian swap. For aligned targets, the 1400 // hardware does the flop for me. Doubles are always aligned, so no problem 1401 // there. Misaligned sources only come from native-long-returns (handled 1402 // special below). 1403 #ifndef _LP64 1404 if( src_first_rc == rc_int && // source is already big-endian 1405 src_second_rc != rc_bad && // 64-bit move 1406 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst 1407 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" ); 1408 // Do the big-endian flop. 1409 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ; 1410 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc; 1411 } 1412 #endif 1413 1414 // -------------------------------------- 1415 // Check for integer reg-reg copy 1416 if( src_first_rc == rc_int && dst_first_rc == rc_int ) { 1417 #ifndef _LP64 1418 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case 1419 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1420 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1421 // operand contains the least significant word of the 64-bit value and vice versa. 1422 OptoReg::Name tmp = OptoReg::Name(R_O7_num); 1423 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" ); 1424 // Shift O0 left in-place, zero-extend O1, then OR them into the dst 1425 if( cbuf ) { 1426 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 ); 1427 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 ); 1428 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] ); 1429 #ifndef PRODUCT 1430 } else if( !do_size ) { 1431 if( size != 0 ) st->print("\n\t"); 1432 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp)); 1433 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second)); 1434 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first)); 1435 #endif 1436 } 1437 return size+12; 1438 } 1439 else if( dst_first == R_I0_num && dst_second == R_I1_num ) { 1440 // returning a long value in I0/I1 1441 // a SpillCopy must be able to target a return instruction's reg_class 1442 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1443 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1444 // operand contains the least significant word of the 64-bit value and vice versa. 1445 OptoReg::Name tdest = dst_first; 1446 1447 if (src_first == dst_first) { 1448 tdest = OptoReg::Name(R_O7_num); 1449 size += 4; 1450 } 1451 1452 if( cbuf ) { 1453 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg"); 1454 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1 1455 // ShrL_reg_imm6 1456 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 ); 1457 // ShrR_reg_imm6 src, 0, dst 1458 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 ); 1459 if (tdest != dst_first) { 1460 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] ); 1461 } 1462 } 1463 #ifndef PRODUCT 1464 else if( !do_size ) { 1465 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!! 1466 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest)); 1467 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second)); 1468 if (tdest != dst_first) { 1469 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first)); 1470 } 1471 } 1472 #endif // PRODUCT 1473 return size+8; 1474 } 1475 #endif // !_LP64 1476 // Else normal reg-reg copy 1477 assert( src_second != dst_first, "smashed second before evacuating it" ); 1478 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st); 1479 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" ); 1480 // This moves an aligned adjacent pair. 1481 // See if we are done. 1482 if( src_first+1 == src_second && dst_first+1 == dst_second ) 1483 return size; 1484 } 1485 1486 // Check for integer store 1487 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) { 1488 int offset = ra_->reg2offset(dst_first); 1489 // Further check for aligned-adjacent pair, so we can use a double store 1490 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1491 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st); 1492 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st); 1493 } 1494 1495 // Check for integer load 1496 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) { 1497 int offset = ra_->reg2offset(src_first); 1498 // Further check for aligned-adjacent pair, so we can use a double load 1499 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1500 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st); 1501 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1502 } 1503 1504 // Check for float reg-reg copy 1505 if( src_first_rc == rc_float && dst_first_rc == rc_float ) { 1506 // Further check for aligned-adjacent pair, so we can use a double move 1507 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1508 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st); 1509 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st); 1510 } 1511 1512 // Check for float store 1513 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1514 int offset = ra_->reg2offset(dst_first); 1515 // Further check for aligned-adjacent pair, so we can use a double store 1516 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1517 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st); 1518 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1519 } 1520 1521 // Check for float load 1522 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) { 1523 int offset = ra_->reg2offset(src_first); 1524 // Further check for aligned-adjacent pair, so we can use a double load 1525 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1526 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st); 1527 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st); 1528 } 1529 1530 // -------------------------------------------------------------------- 1531 // Check for hi bits still needing moving. Only happens for misaligned 1532 // arguments to native calls. 1533 if( src_second == dst_second ) 1534 return size; // Self copy; no move 1535 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); 1536 1537 #ifndef _LP64 1538 // In the LP64 build, all registers can be moved as aligned/adjacent 1539 // pairs, so there's never any need to move the high bits separately. 1540 // The 32-bit builds have to deal with the 32-bit ABI which can force 1541 // all sorts of silly alignment problems. 1542 1543 // Check for integer reg-reg copy. Hi bits are stuck up in the top 1544 // 32-bits of a 64-bit register, but are needed in low bits of another 1545 // register (else it's a hi-bits-to-hi-bits copy which should have 1546 // happened already as part of a 64-bit move) 1547 if( src_second_rc == rc_int && dst_second_rc == rc_int ) { 1548 assert( (src_second&1)==1, "its the evil O0/O1 native return case" ); 1549 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" ); 1550 // Shift src_second down to dst_second's low bits. 1551 if( cbuf ) { 1552 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1553 #ifndef PRODUCT 1554 } else if( !do_size ) { 1555 if( size != 0 ) st->print("\n\t"); 1556 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second)); 1557 #endif 1558 } 1559 return size+4; 1560 } 1561 1562 // Check for high word integer store. Must down-shift the hi bits 1563 // into a temp register, then fall into the case of storing int bits. 1564 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) { 1565 // Shift src_second down to dst_second's low bits. 1566 if( cbuf ) { 1567 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1568 #ifndef PRODUCT 1569 } else if( !do_size ) { 1570 if( size != 0 ) st->print("\n\t"); 1571 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num)); 1572 #endif 1573 } 1574 size+=4; 1575 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num! 1576 } 1577 1578 // Check for high word integer load 1579 if( dst_second_rc == rc_int && src_second_rc == rc_stack ) 1580 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st); 1581 1582 // Check for high word integer store 1583 if( src_second_rc == rc_int && dst_second_rc == rc_stack ) 1584 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st); 1585 1586 // Check for high word float store 1587 if( src_second_rc == rc_float && dst_second_rc == rc_stack ) 1588 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st); 1589 1590 #endif // !_LP64 1591 1592 Unimplemented(); 1593 } 1594 1595 #ifndef PRODUCT 1596 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1597 implementation( NULL, ra_, false, st ); 1598 } 1599 #endif 1600 1601 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1602 implementation( &cbuf, ra_, false, NULL ); 1603 } 1604 1605 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1606 return implementation( NULL, ra_, true, NULL ); 1607 } 1608 1609 //============================================================================= 1610 #ifndef PRODUCT 1611 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const { 1612 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1613 } 1614 #endif 1615 1616 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { 1617 MacroAssembler _masm(&cbuf); 1618 for(int i = 0; i < _count; i += 1) { 1619 __ nop(); 1620 } 1621 } 1622 1623 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1624 return 4 * _count; 1625 } 1626 1627 1628 //============================================================================= 1629 #ifndef PRODUCT 1630 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1631 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1632 int reg = ra_->get_reg_first(this); 1633 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1634 } 1635 #endif 1636 1637 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1638 MacroAssembler _masm(&cbuf); 1639 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1640 int reg = ra_->get_encode(this); 1641 1642 if (Assembler::is_simm13(offset)) { 1643 __ add(SP, offset, reg_to_register_object(reg)); 1644 } else { 1645 __ set(offset, O7); 1646 __ add(SP, O7, reg_to_register_object(reg)); 1647 } 1648 } 1649 1650 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1651 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1652 assert(ra_ == ra_->C->regalloc(), "sanity"); 1653 return ra_->C->scratch_emit_size(this); 1654 } 1655 1656 //============================================================================= 1657 #ifndef PRODUCT 1658 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1659 st->print_cr("\nUEP:"); 1660 #ifdef _LP64 1661 if (UseCompressedClassPointers) { 1662 assert(Universe::heap() != NULL, "java heap should be initialized"); 1663 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1664 if (Universe::narrow_klass_base() != 0) { 1665 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); 1666 if (Universe::narrow_klass_shift() != 0) { 1667 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1668 } 1669 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1670 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); 1671 } else { 1672 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1673 } 1674 } else { 1675 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1676 } 1677 st->print_cr("\tCMP R_G5,R_G3" ); 1678 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1679 #else // _LP64 1680 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1681 st->print_cr("\tCMP R_G5,R_G3" ); 1682 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1683 #endif // _LP64 1684 } 1685 #endif 1686 1687 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1688 MacroAssembler _masm(&cbuf); 1689 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1690 Register temp_reg = G3; 1691 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1692 1693 // Load klass from receiver 1694 __ load_klass(O0, temp_reg); 1695 // Compare against expected klass 1696 __ cmp(temp_reg, G5_ic_reg); 1697 // Branch to miss code, checks xcc or icc depending 1698 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1699 } 1700 1701 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1702 return MachNode::size(ra_); 1703 } 1704 1705 1706 //============================================================================= 1707 1708 uint size_exception_handler() { 1709 if (TraceJumps) { 1710 return (400); // just a guess 1711 } 1712 return ( NativeJump::instruction_size ); // sethi;jmp;nop 1713 } 1714 1715 uint size_deopt_handler() { 1716 if (TraceJumps) { 1717 return (400); // just a guess 1718 } 1719 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 1720 } 1721 1722 // Emit exception handler code. 1723 int emit_exception_handler(CodeBuffer& cbuf) { 1724 Register temp_reg = G3; 1725 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1726 MacroAssembler _masm(&cbuf); 1727 1728 address base = 1729 __ start_a_stub(size_exception_handler()); 1730 if (base == NULL) return 0; // CodeBuffer::expand failed 1731 1732 int offset = __ offset(); 1733 1734 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1735 __ delayed()->nop(); 1736 1737 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1738 1739 __ end_a_stub(); 1740 1741 return offset; 1742 } 1743 1744 int emit_deopt_handler(CodeBuffer& cbuf) { 1745 // Can't use any of the current frame's registers as we may have deopted 1746 // at a poll and everything (including G3) can be live. 1747 Register temp_reg = L0; 1748 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1749 MacroAssembler _masm(&cbuf); 1750 1751 address base = 1752 __ start_a_stub(size_deopt_handler()); 1753 if (base == NULL) return 0; // CodeBuffer::expand failed 1754 1755 int offset = __ offset(); 1756 __ save_frame(0); 1757 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1758 __ delayed()->restore(); 1759 1760 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1761 1762 __ end_a_stub(); 1763 return offset; 1764 1765 } 1766 1767 // Given a register encoding, produce a Integer Register object 1768 static Register reg_to_register_object(int register_encoding) { 1769 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1770 return as_Register(register_encoding); 1771 } 1772 1773 // Given a register encoding, produce a single-precision Float Register object 1774 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1775 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1776 return as_SingleFloatRegister(register_encoding); 1777 } 1778 1779 // Given a register encoding, produce a double-precision Float Register object 1780 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1781 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1782 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1783 return as_DoubleFloatRegister(register_encoding); 1784 } 1785 1786 const bool Matcher::match_rule_supported(int opcode) { 1787 if (!has_match_rule(opcode)) 1788 return false; 1789 1790 switch (opcode) { 1791 case Op_CountLeadingZerosI: 1792 case Op_CountLeadingZerosL: 1793 case Op_CountTrailingZerosI: 1794 case Op_CountTrailingZerosL: 1795 case Op_PopCountI: 1796 case Op_PopCountL: 1797 if (!UsePopCountInstruction) 1798 return false; 1799 case Op_CompareAndSwapL: 1800 #ifdef _LP64 1801 case Op_CompareAndSwapP: 1802 #endif 1803 if (!VM_Version::supports_cx8()) 1804 return false; 1805 break; 1806 } 1807 1808 return true; // Per default match rules are supported. 1809 } 1810 1811 int Matcher::regnum_to_fpu_offset(int regnum) { 1812 return regnum - 32; // The FP registers are in the second chunk 1813 } 1814 1815 #ifdef ASSERT 1816 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1817 #endif 1818 1819 // Vector width in bytes 1820 const int Matcher::vector_width_in_bytes(BasicType bt) { 1821 assert(MaxVectorSize == 8, ""); 1822 return 8; 1823 } 1824 1825 // Vector ideal reg 1826 const int Matcher::vector_ideal_reg(int size) { 1827 assert(MaxVectorSize == 8, ""); 1828 return Op_RegD; 1829 } 1830 1831 const int Matcher::vector_shift_count_ideal_reg(int size) { 1832 fatal("vector shift is not supported"); 1833 return Node::NotAMachineReg; 1834 } 1835 1836 // Limits on vector size (number of elements) loaded into vector. 1837 const int Matcher::max_vector_size(const BasicType bt) { 1838 assert(is_java_primitive(bt), "only primitive type vectors"); 1839 return vector_width_in_bytes(bt)/type2aelembytes(bt); 1840 } 1841 1842 const int Matcher::min_vector_size(const BasicType bt) { 1843 return max_vector_size(bt); // Same as max. 1844 } 1845 1846 // SPARC doesn't support misaligned vectors store/load. 1847 const bool Matcher::misaligned_vectors_ok() { 1848 return false; 1849 } 1850 1851 // Current (2013) SPARC platforms need to read original key 1852 // to construct decryption expanded key 1853 const bool Matcher::pass_original_key_for_aes() { 1854 return true; 1855 } 1856 1857 // USII supports fxtof through the whole range of number, USIII doesn't 1858 const bool Matcher::convL2FSupported(void) { 1859 return VM_Version::has_fast_fxtof(); 1860 } 1861 1862 // Is this branch offset short enough that a short branch can be used? 1863 // 1864 // NOTE: If the platform does not provide any short branch variants, then 1865 // this method should return false for offset 0. 1866 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1867 // The passed offset is relative to address of the branch. 1868 // Don't need to adjust the offset. 1869 return UseCBCond && Assembler::is_simm12(offset); 1870 } 1871 1872 const bool Matcher::isSimpleConstant64(jlong value) { 1873 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1874 // Depends on optimizations in MacroAssembler::setx. 1875 int hi = (int)(value >> 32); 1876 int lo = (int)(value & ~0); 1877 return (hi == 0) || (hi == -1) || (lo == 0); 1878 } 1879 1880 // No scaling for the parameter the ClearArray node. 1881 const bool Matcher::init_array_count_is_in_bytes = true; 1882 1883 // Threshold size for cleararray. 1884 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1885 1886 // No additional cost for CMOVL. 1887 const int Matcher::long_cmove_cost() { return 0; } 1888 1889 // CMOVF/CMOVD are expensive on T4 and on SPARC64. 1890 const int Matcher::float_cmove_cost() { 1891 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0; 1892 } 1893 1894 // Should the Matcher clone shifts on addressing modes, expecting them to 1895 // be subsumed into complex addressing expressions or compute them into 1896 // registers? True for Intel but false for most RISCs 1897 const bool Matcher::clone_shift_expressions = false; 1898 1899 // Do we need to mask the count passed to shift instructions or does 1900 // the cpu only look at the lower 5/6 bits anyway? 1901 const bool Matcher::need_masked_shift_count = false; 1902 1903 bool Matcher::narrow_oop_use_complex_address() { 1904 NOT_LP64(ShouldNotCallThis()); 1905 assert(UseCompressedOops, "only for compressed oops code"); 1906 return false; 1907 } 1908 1909 bool Matcher::narrow_klass_use_complex_address() { 1910 NOT_LP64(ShouldNotCallThis()); 1911 assert(UseCompressedClassPointers, "only for compressed klass code"); 1912 return false; 1913 } 1914 1915 // Is it better to copy float constants, or load them directly from memory? 1916 // Intel can load a float constant from a direct address, requiring no 1917 // extra registers. Most RISCs will have to materialize an address into a 1918 // register first, so they would do better to copy the constant from stack. 1919 const bool Matcher::rematerialize_float_constants = false; 1920 1921 // If CPU can load and store mis-aligned doubles directly then no fixup is 1922 // needed. Else we split the double into 2 integer pieces and move it 1923 // piece-by-piece. Only happens when passing doubles into C code as the 1924 // Java calling convention forces doubles to be aligned. 1925 #ifdef _LP64 1926 const bool Matcher::misaligned_doubles_ok = true; 1927 #else 1928 const bool Matcher::misaligned_doubles_ok = false; 1929 #endif 1930 1931 // No-op on SPARC. 1932 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 1933 } 1934 1935 // Advertise here if the CPU requires explicit rounding operations 1936 // to implement the UseStrictFP mode. 1937 const bool Matcher::strict_fp_requires_explicit_rounding = false; 1938 1939 // Are floats conerted to double when stored to stack during deoptimization? 1940 // Sparc does not handle callee-save floats. 1941 bool Matcher::float_in_double() { return false; } 1942 1943 // Do ints take an entire long register or just half? 1944 // Note that we if-def off of _LP64. 1945 // The relevant question is how the int is callee-saved. In _LP64 1946 // the whole long is written but de-opt'ing will have to extract 1947 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 1948 #ifdef _LP64 1949 const bool Matcher::int_in_long = true; 1950 #else 1951 const bool Matcher::int_in_long = false; 1952 #endif 1953 1954 // Return whether or not this register is ever used as an argument. This 1955 // function is used on startup to build the trampoline stubs in generateOptoStub. 1956 // Registers not mentioned will be killed by the VM call in the trampoline, and 1957 // arguments in those registers not be available to the callee. 1958 bool Matcher::can_be_java_arg( int reg ) { 1959 // Standard sparc 6 args in registers 1960 if( reg == R_I0_num || 1961 reg == R_I1_num || 1962 reg == R_I2_num || 1963 reg == R_I3_num || 1964 reg == R_I4_num || 1965 reg == R_I5_num ) return true; 1966 #ifdef _LP64 1967 // 64-bit builds can pass 64-bit pointers and longs in 1968 // the high I registers 1969 if( reg == R_I0H_num || 1970 reg == R_I1H_num || 1971 reg == R_I2H_num || 1972 reg == R_I3H_num || 1973 reg == R_I4H_num || 1974 reg == R_I5H_num ) return true; 1975 1976 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 1977 return true; 1978 } 1979 1980 #else 1981 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4. 1982 // Longs cannot be passed in O regs, because O regs become I regs 1983 // after a 'save' and I regs get their high bits chopped off on 1984 // interrupt. 1985 if( reg == R_G1H_num || reg == R_G1_num ) return true; 1986 if( reg == R_G4H_num || reg == R_G4_num ) return true; 1987 #endif 1988 // A few float args in registers 1989 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 1990 1991 return false; 1992 } 1993 1994 bool Matcher::is_spillable_arg( int reg ) { 1995 return can_be_java_arg(reg); 1996 } 1997 1998 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 1999 // Use hardware SDIVX instruction when it is 2000 // faster than a code which use multiply. 2001 return VM_Version::has_fast_idiv(); 2002 } 2003 2004 // Register for DIVI projection of divmodI 2005 RegMask Matcher::divI_proj_mask() { 2006 ShouldNotReachHere(); 2007 return RegMask(); 2008 } 2009 2010 // Register for MODI projection of divmodI 2011 RegMask Matcher::modI_proj_mask() { 2012 ShouldNotReachHere(); 2013 return RegMask(); 2014 } 2015 2016 // Register for DIVL projection of divmodL 2017 RegMask Matcher::divL_proj_mask() { 2018 ShouldNotReachHere(); 2019 return RegMask(); 2020 } 2021 2022 // Register for MODL projection of divmodL 2023 RegMask Matcher::modL_proj_mask() { 2024 ShouldNotReachHere(); 2025 return RegMask(); 2026 } 2027 2028 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2029 return L7_REGP_mask(); 2030 } 2031 2032 const RegMask Matcher::mathExactI_result_proj_mask() { 2033 return G1_REGI_mask(); 2034 } 2035 2036 const RegMask Matcher::mathExactL_result_proj_mask() { 2037 return G1_REGL_mask(); 2038 } 2039 2040 const RegMask Matcher::mathExactI_flags_proj_mask() { 2041 return INT_FLAGS_mask(); 2042 } 2043 2044 2045 %} 2046 2047 2048 // The intptr_t operand types, defined by textual substitution. 2049 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 2050 #ifdef _LP64 2051 #define immX immL 2052 #define immX13 immL13 2053 #define immX13m7 immL13m7 2054 #define iRegX iRegL 2055 #define g1RegX g1RegL 2056 #else 2057 #define immX immI 2058 #define immX13 immI13 2059 #define immX13m7 immI13m7 2060 #define iRegX iRegI 2061 #define g1RegX g1RegI 2062 #endif 2063 2064 //----------ENCODING BLOCK----------------------------------------------------- 2065 // This block specifies the encoding classes used by the compiler to output 2066 // byte streams. Encoding classes are parameterized macros used by 2067 // Machine Instruction Nodes in order to generate the bit encoding of the 2068 // instruction. Operands specify their base encoding interface with the 2069 // interface keyword. There are currently supported four interfaces, 2070 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2071 // operand to generate a function which returns its register number when 2072 // queried. CONST_INTER causes an operand to generate a function which 2073 // returns the value of the constant when queried. MEMORY_INTER causes an 2074 // operand to generate four functions which return the Base Register, the 2075 // Index Register, the Scale Value, and the Offset Value of the operand when 2076 // queried. COND_INTER causes an operand to generate six functions which 2077 // return the encoding code (ie - encoding bits for the instruction) 2078 // associated with each basic boolean condition for a conditional instruction. 2079 // 2080 // Instructions specify two basic values for encoding. Again, a function 2081 // is available to check if the constant displacement is an oop. They use the 2082 // ins_encode keyword to specify their encoding classes (which must be 2083 // a sequence of enc_class names, and their parameters, specified in 2084 // the encoding block), and they use the 2085 // opcode keyword to specify, in order, their primary, secondary, and 2086 // tertiary opcode. Only the opcode sections which a particular instruction 2087 // needs for encoding need to be specified. 2088 encode %{ 2089 enc_class enc_untested %{ 2090 #ifdef ASSERT 2091 MacroAssembler _masm(&cbuf); 2092 __ untested("encoding"); 2093 #endif 2094 %} 2095 2096 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 2097 emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary, 2098 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2099 %} 2100 2101 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2102 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2103 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2104 %} 2105 2106 enc_class form3_mem_prefetch_read( memory mem ) %{ 2107 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2108 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2109 %} 2110 2111 enc_class form3_mem_prefetch_write( memory mem ) %{ 2112 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2113 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2114 %} 2115 2116 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2117 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2118 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2119 guarantee($mem$$index == R_G0_enc, "double index?"); 2120 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2121 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2122 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2123 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2124 %} 2125 2126 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2127 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2128 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2129 guarantee($mem$$index == R_G0_enc, "double index?"); 2130 // Load long with 2 instructions 2131 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2132 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2133 %} 2134 2135 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2136 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2137 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2138 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2139 %} 2140 2141 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2142 // Encode a reg-reg copy. If it is useless, then empty encoding. 2143 if( $rs2$$reg != $rd$$reg ) 2144 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2145 %} 2146 2147 // Target lo half of long 2148 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2149 // Encode a reg-reg copy. If it is useless, then empty encoding. 2150 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2151 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2152 %} 2153 2154 // Source lo half of long 2155 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2156 // Encode a reg-reg copy. If it is useless, then empty encoding. 2157 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2158 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2159 %} 2160 2161 // Target hi half of long 2162 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2163 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2164 %} 2165 2166 // Source lo half of long, and leave it sign extended. 2167 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2168 // Sign extend low half 2169 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2170 %} 2171 2172 // Source hi half of long, and leave it sign extended. 2173 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2174 // Shift high half to low half 2175 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2176 %} 2177 2178 // Source hi half of long 2179 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2180 // Encode a reg-reg copy. If it is useless, then empty encoding. 2181 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2182 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2183 %} 2184 2185 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2186 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2187 %} 2188 2189 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2190 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2191 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2192 %} 2193 2194 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2195 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2196 // clear if nothing else is happening 2197 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2198 // blt,a,pn done 2199 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2200 // mov dst,-1 in delay slot 2201 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2202 %} 2203 2204 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2205 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2206 %} 2207 2208 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2209 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2210 %} 2211 2212 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2213 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2214 %} 2215 2216 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2217 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2218 %} 2219 2220 enc_class move_return_pc_to_o1() %{ 2221 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2222 %} 2223 2224 #ifdef _LP64 2225 /* %%% merge with enc_to_bool */ 2226 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2227 MacroAssembler _masm(&cbuf); 2228 2229 Register src_reg = reg_to_register_object($src$$reg); 2230 Register dst_reg = reg_to_register_object($dst$$reg); 2231 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2232 %} 2233 #endif 2234 2235 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2236 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2237 MacroAssembler _masm(&cbuf); 2238 2239 Register p_reg = reg_to_register_object($p$$reg); 2240 Register q_reg = reg_to_register_object($q$$reg); 2241 Register y_reg = reg_to_register_object($y$$reg); 2242 Register tmp_reg = reg_to_register_object($tmp$$reg); 2243 2244 __ subcc( p_reg, q_reg, p_reg ); 2245 __ add ( p_reg, y_reg, tmp_reg ); 2246 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2247 %} 2248 2249 enc_class form_d2i_helper(regD src, regF dst) %{ 2250 // fcmp %fcc0,$src,$src 2251 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2252 // branch %fcc0 not-nan, predict taken 2253 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2254 // fdtoi $src,$dst 2255 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2256 // fitos $dst,$dst (if nan) 2257 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2258 // clear $dst (if nan) 2259 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2260 // carry on here... 2261 %} 2262 2263 enc_class form_d2l_helper(regD src, regD dst) %{ 2264 // fcmp %fcc0,$src,$src check for NAN 2265 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2266 // branch %fcc0 not-nan, predict taken 2267 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2268 // fdtox $src,$dst convert in delay slot 2269 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2270 // fxtod $dst,$dst (if nan) 2271 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2272 // clear $dst (if nan) 2273 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2274 // carry on here... 2275 %} 2276 2277 enc_class form_f2i_helper(regF src, regF dst) %{ 2278 // fcmps %fcc0,$src,$src 2279 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2280 // branch %fcc0 not-nan, predict taken 2281 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2282 // fstoi $src,$dst 2283 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2284 // fitos $dst,$dst (if nan) 2285 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2286 // clear $dst (if nan) 2287 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2288 // carry on here... 2289 %} 2290 2291 enc_class form_f2l_helper(regF src, regD dst) %{ 2292 // fcmps %fcc0,$src,$src 2293 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2294 // branch %fcc0 not-nan, predict taken 2295 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2296 // fstox $src,$dst 2297 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2298 // fxtod $dst,$dst (if nan) 2299 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2300 // clear $dst (if nan) 2301 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2302 // carry on here... 2303 %} 2304 2305 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2306 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2307 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2308 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2309 2310 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2311 2312 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2313 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2314 2315 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2316 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2317 %} 2318 2319 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2320 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2321 %} 2322 2323 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2324 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2325 %} 2326 2327 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2328 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2329 %} 2330 2331 enc_class form3_convI2F(regF rs2, regF rd) %{ 2332 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2333 %} 2334 2335 // Encloding class for traceable jumps 2336 enc_class form_jmpl(g3RegP dest) %{ 2337 emit_jmpl(cbuf, $dest$$reg); 2338 %} 2339 2340 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2341 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2342 %} 2343 2344 enc_class form2_nop() %{ 2345 emit_nop(cbuf); 2346 %} 2347 2348 enc_class form2_illtrap() %{ 2349 emit_illtrap(cbuf); 2350 %} 2351 2352 2353 // Compare longs and convert into -1, 0, 1. 2354 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2355 // CMP $src1,$src2 2356 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2357 // blt,a,pn done 2358 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2359 // mov dst,-1 in delay slot 2360 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2361 // bgt,a,pn done 2362 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2363 // mov dst,1 in delay slot 2364 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2365 // CLR $dst 2366 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2367 %} 2368 2369 enc_class enc_PartialSubtypeCheck() %{ 2370 MacroAssembler _masm(&cbuf); 2371 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2372 __ delayed()->nop(); 2373 %} 2374 2375 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2376 MacroAssembler _masm(&cbuf); 2377 Label* L = $labl$$label; 2378 Assembler::Predict predict_taken = 2379 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2380 2381 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2382 __ delayed()->nop(); 2383 %} 2384 2385 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2386 MacroAssembler _masm(&cbuf); 2387 Label* L = $labl$$label; 2388 Assembler::Predict predict_taken = 2389 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2390 2391 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2392 __ delayed()->nop(); 2393 %} 2394 2395 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2396 int op = (Assembler::arith_op << 30) | 2397 ($dst$$reg << 25) | 2398 (Assembler::movcc_op3 << 19) | 2399 (1 << 18) | // cc2 bit for 'icc' 2400 ($cmp$$cmpcode << 14) | 2401 (0 << 13) | // select register move 2402 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2403 ($src$$reg << 0); 2404 cbuf.insts()->emit_int32(op); 2405 %} 2406 2407 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2408 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2409 int op = (Assembler::arith_op << 30) | 2410 ($dst$$reg << 25) | 2411 (Assembler::movcc_op3 << 19) | 2412 (1 << 18) | // cc2 bit for 'icc' 2413 ($cmp$$cmpcode << 14) | 2414 (1 << 13) | // select immediate move 2415 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2416 (simm11 << 0); 2417 cbuf.insts()->emit_int32(op); 2418 %} 2419 2420 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2421 int op = (Assembler::arith_op << 30) | 2422 ($dst$$reg << 25) | 2423 (Assembler::movcc_op3 << 19) | 2424 (0 << 18) | // cc2 bit for 'fccX' 2425 ($cmp$$cmpcode << 14) | 2426 (0 << 13) | // select register move 2427 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2428 ($src$$reg << 0); 2429 cbuf.insts()->emit_int32(op); 2430 %} 2431 2432 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2433 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2434 int op = (Assembler::arith_op << 30) | 2435 ($dst$$reg << 25) | 2436 (Assembler::movcc_op3 << 19) | 2437 (0 << 18) | // cc2 bit for 'fccX' 2438 ($cmp$$cmpcode << 14) | 2439 (1 << 13) | // select immediate move 2440 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2441 (simm11 << 0); 2442 cbuf.insts()->emit_int32(op); 2443 %} 2444 2445 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2446 int op = (Assembler::arith_op << 30) | 2447 ($dst$$reg << 25) | 2448 (Assembler::fpop2_op3 << 19) | 2449 (0 << 18) | 2450 ($cmp$$cmpcode << 14) | 2451 (1 << 13) | // select register move 2452 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2453 ($primary << 5) | // select single, double or quad 2454 ($src$$reg << 0); 2455 cbuf.insts()->emit_int32(op); 2456 %} 2457 2458 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2459 int op = (Assembler::arith_op << 30) | 2460 ($dst$$reg << 25) | 2461 (Assembler::fpop2_op3 << 19) | 2462 (0 << 18) | 2463 ($cmp$$cmpcode << 14) | 2464 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2465 ($primary << 5) | // select single, double or quad 2466 ($src$$reg << 0); 2467 cbuf.insts()->emit_int32(op); 2468 %} 2469 2470 // Used by the MIN/MAX encodings. Same as a CMOV, but 2471 // the condition comes from opcode-field instead of an argument. 2472 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2473 int op = (Assembler::arith_op << 30) | 2474 ($dst$$reg << 25) | 2475 (Assembler::movcc_op3 << 19) | 2476 (1 << 18) | // cc2 bit for 'icc' 2477 ($primary << 14) | 2478 (0 << 13) | // select register move 2479 (0 << 11) | // cc1, cc0 bits for 'icc' 2480 ($src$$reg << 0); 2481 cbuf.insts()->emit_int32(op); 2482 %} 2483 2484 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2485 int op = (Assembler::arith_op << 30) | 2486 ($dst$$reg << 25) | 2487 (Assembler::movcc_op3 << 19) | 2488 (6 << 16) | // cc2 bit for 'xcc' 2489 ($primary << 14) | 2490 (0 << 13) | // select register move 2491 (0 << 11) | // cc1, cc0 bits for 'icc' 2492 ($src$$reg << 0); 2493 cbuf.insts()->emit_int32(op); 2494 %} 2495 2496 enc_class Set13( immI13 src, iRegI rd ) %{ 2497 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2498 %} 2499 2500 enc_class SetHi22( immI src, iRegI rd ) %{ 2501 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2502 %} 2503 2504 enc_class Set32( immI src, iRegI rd ) %{ 2505 MacroAssembler _masm(&cbuf); 2506 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2507 %} 2508 2509 enc_class call_epilog %{ 2510 if( VerifyStackAtCalls ) { 2511 MacroAssembler _masm(&cbuf); 2512 int framesize = ra_->C->frame_slots() << LogBytesPerInt; 2513 Register temp_reg = G3; 2514 __ add(SP, framesize, temp_reg); 2515 __ cmp(temp_reg, FP); 2516 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2517 } 2518 %} 2519 2520 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2521 // to G1 so the register allocator will not have to deal with the misaligned register 2522 // pair. 2523 enc_class adjust_long_from_native_call %{ 2524 #ifndef _LP64 2525 if (returns_long()) { 2526 // sllx O0,32,O0 2527 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 ); 2528 // srl O1,0,O1 2529 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 ); 2530 // or O0,O1,G1 2531 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc ); 2532 } 2533 #endif 2534 %} 2535 2536 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2537 // CALL directly to the runtime 2538 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2539 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, 2540 /*preserve_g2=*/true); 2541 %} 2542 2543 enc_class preserve_SP %{ 2544 MacroAssembler _masm(&cbuf); 2545 __ mov(SP, L7_mh_SP_save); 2546 %} 2547 2548 enc_class restore_SP %{ 2549 MacroAssembler _masm(&cbuf); 2550 __ mov(L7_mh_SP_save, SP); 2551 %} 2552 2553 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2554 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2555 // who we intended to call. 2556 if (!_method) { 2557 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); 2558 } else if (_optimized_virtual) { 2559 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); 2560 } else { 2561 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); 2562 } 2563 if (_method) { // Emit stub for static call. 2564 CompiledStaticCall::emit_to_interp_stub(cbuf); 2565 } 2566 %} 2567 2568 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2569 MacroAssembler _masm(&cbuf); 2570 __ set_inst_mark(); 2571 int vtable_index = this->_vtable_index; 2572 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2573 if (vtable_index < 0) { 2574 // must be invalid_vtable_index, not nonvirtual_vtable_index 2575 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 2576 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2577 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2578 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2579 __ ic_call((address)$meth$$method); 2580 } else { 2581 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2582 // Just go thru the vtable 2583 // get receiver klass (receiver already checked for non-null) 2584 // If we end up going thru a c2i adapter interpreter expects method in G5 2585 int off = __ offset(); 2586 __ load_klass(O0, G3_scratch); 2587 int klass_load_size; 2588 if (UseCompressedClassPointers) { 2589 assert(Universe::heap() != NULL, "java heap should be initialized"); 2590 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 2591 } else { 2592 klass_load_size = 1*BytesPerInstWord; 2593 } 2594 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 2595 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 2596 if (Assembler::is_simm13(v_off)) { 2597 __ ld_ptr(G3, v_off, G5_method); 2598 } else { 2599 // Generate 2 instructions 2600 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2601 __ or3(G5_method, v_off & 0x3ff, G5_method); 2602 // ld_ptr, set_hi, set 2603 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2604 "Unexpected instruction size(s)"); 2605 __ ld_ptr(G3, G5_method, G5_method); 2606 } 2607 // NOTE: for vtable dispatches, the vtable entry will never be null. 2608 // However it may very well end up in handle_wrong_method if the 2609 // method is abstract for the particular class. 2610 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 2611 // jump to target (either compiled code or c2iadapter) 2612 __ jmpl(G3_scratch, G0, O7); 2613 __ delayed()->nop(); 2614 } 2615 %} 2616 2617 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2618 MacroAssembler _masm(&cbuf); 2619 2620 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2621 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2622 // we might be calling a C2I adapter which needs it. 2623 2624 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2625 // Load nmethod 2626 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg); 2627 2628 // CALL to compiled java, indirect the contents of G3 2629 __ set_inst_mark(); 2630 __ callr(temp_reg, G0); 2631 __ delayed()->nop(); 2632 %} 2633 2634 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2635 MacroAssembler _masm(&cbuf); 2636 Register Rdividend = reg_to_register_object($src1$$reg); 2637 Register Rdivisor = reg_to_register_object($src2$$reg); 2638 Register Rresult = reg_to_register_object($dst$$reg); 2639 2640 __ sra(Rdivisor, 0, Rdivisor); 2641 __ sra(Rdividend, 0, Rdividend); 2642 __ sdivx(Rdividend, Rdivisor, Rresult); 2643 %} 2644 2645 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2646 MacroAssembler _masm(&cbuf); 2647 2648 Register Rdividend = reg_to_register_object($src1$$reg); 2649 int divisor = $imm$$constant; 2650 Register Rresult = reg_to_register_object($dst$$reg); 2651 2652 __ sra(Rdividend, 0, Rdividend); 2653 __ sdivx(Rdividend, divisor, Rresult); 2654 %} 2655 2656 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2657 MacroAssembler _masm(&cbuf); 2658 Register Rsrc1 = reg_to_register_object($src1$$reg); 2659 Register Rsrc2 = reg_to_register_object($src2$$reg); 2660 Register Rdst = reg_to_register_object($dst$$reg); 2661 2662 __ sra( Rsrc1, 0, Rsrc1 ); 2663 __ sra( Rsrc2, 0, Rsrc2 ); 2664 __ mulx( Rsrc1, Rsrc2, Rdst ); 2665 __ srlx( Rdst, 32, Rdst ); 2666 %} 2667 2668 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2669 MacroAssembler _masm(&cbuf); 2670 Register Rdividend = reg_to_register_object($src1$$reg); 2671 Register Rdivisor = reg_to_register_object($src2$$reg); 2672 Register Rresult = reg_to_register_object($dst$$reg); 2673 Register Rscratch = reg_to_register_object($scratch$$reg); 2674 2675 assert(Rdividend != Rscratch, ""); 2676 assert(Rdivisor != Rscratch, ""); 2677 2678 __ sra(Rdividend, 0, Rdividend); 2679 __ sra(Rdivisor, 0, Rdivisor); 2680 __ sdivx(Rdividend, Rdivisor, Rscratch); 2681 __ mulx(Rscratch, Rdivisor, Rscratch); 2682 __ sub(Rdividend, Rscratch, Rresult); 2683 %} 2684 2685 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2686 MacroAssembler _masm(&cbuf); 2687 2688 Register Rdividend = reg_to_register_object($src1$$reg); 2689 int divisor = $imm$$constant; 2690 Register Rresult = reg_to_register_object($dst$$reg); 2691 Register Rscratch = reg_to_register_object($scratch$$reg); 2692 2693 assert(Rdividend != Rscratch, ""); 2694 2695 __ sra(Rdividend, 0, Rdividend); 2696 __ sdivx(Rdividend, divisor, Rscratch); 2697 __ mulx(Rscratch, divisor, Rscratch); 2698 __ sub(Rdividend, Rscratch, Rresult); 2699 %} 2700 2701 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2702 MacroAssembler _masm(&cbuf); 2703 2704 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2705 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2706 2707 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2708 %} 2709 2710 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2711 MacroAssembler _masm(&cbuf); 2712 2713 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2714 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2715 2716 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2717 %} 2718 2719 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2720 MacroAssembler _masm(&cbuf); 2721 2722 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2723 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2724 2725 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2726 %} 2727 2728 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2729 MacroAssembler _masm(&cbuf); 2730 2731 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2732 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2733 2734 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2735 %} 2736 2737 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2738 MacroAssembler _masm(&cbuf); 2739 2740 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2741 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2742 2743 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2744 %} 2745 2746 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2747 MacroAssembler _masm(&cbuf); 2748 2749 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2750 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2751 2752 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2753 %} 2754 2755 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2756 MacroAssembler _masm(&cbuf); 2757 2758 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2759 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2760 2761 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2762 %} 2763 2764 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2765 MacroAssembler _masm(&cbuf); 2766 2767 Register Roop = reg_to_register_object($oop$$reg); 2768 Register Rbox = reg_to_register_object($box$$reg); 2769 Register Rscratch = reg_to_register_object($scratch$$reg); 2770 Register Rmark = reg_to_register_object($scratch2$$reg); 2771 2772 assert(Roop != Rscratch, ""); 2773 assert(Roop != Rmark, ""); 2774 assert(Rbox != Rscratch, ""); 2775 assert(Rbox != Rmark, ""); 2776 2777 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2778 %} 2779 2780 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2781 MacroAssembler _masm(&cbuf); 2782 2783 Register Roop = reg_to_register_object($oop$$reg); 2784 Register Rbox = reg_to_register_object($box$$reg); 2785 Register Rscratch = reg_to_register_object($scratch$$reg); 2786 Register Rmark = reg_to_register_object($scratch2$$reg); 2787 2788 assert(Roop != Rscratch, ""); 2789 assert(Roop != Rmark, ""); 2790 assert(Rbox != Rscratch, ""); 2791 assert(Rbox != Rmark, ""); 2792 2793 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2794 %} 2795 2796 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2797 MacroAssembler _masm(&cbuf); 2798 Register Rmem = reg_to_register_object($mem$$reg); 2799 Register Rold = reg_to_register_object($old$$reg); 2800 Register Rnew = reg_to_register_object($new$$reg); 2801 2802 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2803 __ cmp( Rold, Rnew ); 2804 %} 2805 2806 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2807 Register Rmem = reg_to_register_object($mem$$reg); 2808 Register Rold = reg_to_register_object($old$$reg); 2809 Register Rnew = reg_to_register_object($new$$reg); 2810 2811 MacroAssembler _masm(&cbuf); 2812 __ mov(Rnew, O7); 2813 __ casx(Rmem, Rold, O7); 2814 __ cmp( Rold, O7 ); 2815 %} 2816 2817 // raw int cas, used for compareAndSwap 2818 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2819 Register Rmem = reg_to_register_object($mem$$reg); 2820 Register Rold = reg_to_register_object($old$$reg); 2821 Register Rnew = reg_to_register_object($new$$reg); 2822 2823 MacroAssembler _masm(&cbuf); 2824 __ mov(Rnew, O7); 2825 __ cas(Rmem, Rold, O7); 2826 __ cmp( Rold, O7 ); 2827 %} 2828 2829 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2830 Register Rres = reg_to_register_object($res$$reg); 2831 2832 MacroAssembler _masm(&cbuf); 2833 __ mov(1, Rres); 2834 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2835 %} 2836 2837 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2838 Register Rres = reg_to_register_object($res$$reg); 2839 2840 MacroAssembler _masm(&cbuf); 2841 __ mov(1, Rres); 2842 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2843 %} 2844 2845 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2846 MacroAssembler _masm(&cbuf); 2847 Register Rdst = reg_to_register_object($dst$$reg); 2848 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2849 : reg_to_DoubleFloatRegister_object($src1$$reg); 2850 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2851 : reg_to_DoubleFloatRegister_object($src2$$reg); 2852 2853 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2854 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2855 %} 2856 2857 2858 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ 2859 Label Ldone, Lloop; 2860 MacroAssembler _masm(&cbuf); 2861 2862 Register str1_reg = reg_to_register_object($str1$$reg); 2863 Register str2_reg = reg_to_register_object($str2$$reg); 2864 Register cnt1_reg = reg_to_register_object($cnt1$$reg); 2865 Register cnt2_reg = reg_to_register_object($cnt2$$reg); 2866 Register result_reg = reg_to_register_object($result$$reg); 2867 2868 assert(result_reg != str1_reg && 2869 result_reg != str2_reg && 2870 result_reg != cnt1_reg && 2871 result_reg != cnt2_reg , 2872 "need different registers"); 2873 2874 // Compute the minimum of the string lengths(str1_reg) and the 2875 // difference of the string lengths (stack) 2876 2877 // See if the lengths are different, and calculate min in str1_reg. 2878 // Stash diff in O7 in case we need it for a tie-breaker. 2879 Label Lskip; 2880 __ subcc(cnt1_reg, cnt2_reg, O7); 2881 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2882 __ br(Assembler::greater, true, Assembler::pt, Lskip); 2883 // cnt2 is shorter, so use its count: 2884 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2885 __ bind(Lskip); 2886 2887 // reallocate cnt1_reg, cnt2_reg, result_reg 2888 // Note: limit_reg holds the string length pre-scaled by 2 2889 Register limit_reg = cnt1_reg; 2890 Register chr2_reg = cnt2_reg; 2891 Register chr1_reg = result_reg; 2892 // str{12} are the base pointers 2893 2894 // Is the minimum length zero? 2895 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity 2896 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2897 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2898 2899 // Load first characters 2900 __ lduh(str1_reg, 0, chr1_reg); 2901 __ lduh(str2_reg, 0, chr2_reg); 2902 2903 // Compare first characters 2904 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2905 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2906 assert(chr1_reg == result_reg, "result must be pre-placed"); 2907 __ delayed()->nop(); 2908 2909 { 2910 // Check after comparing first character to see if strings are equivalent 2911 Label LSkip2; 2912 // Check if the strings start at same location 2913 __ cmp(str1_reg, str2_reg); 2914 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2); 2915 __ delayed()->nop(); 2916 2917 // Check if the length difference is zero (in O7) 2918 __ cmp(G0, O7); 2919 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2920 __ delayed()->mov(G0, result_reg); // result is zero 2921 2922 // Strings might not be equal 2923 __ bind(LSkip2); 2924 } 2925 2926 // We have no guarantee that on 64 bit the higher half of limit_reg is 0 2927 __ signx(limit_reg); 2928 2929 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); 2930 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2931 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2932 2933 // Shift str1_reg and str2_reg to the end of the arrays, negate limit 2934 __ add(str1_reg, limit_reg, str1_reg); 2935 __ add(str2_reg, limit_reg, str2_reg); 2936 __ neg(chr1_reg, limit_reg); // limit = -(limit-2) 2937 2938 // Compare the rest of the characters 2939 __ lduh(str1_reg, limit_reg, chr1_reg); 2940 __ bind(Lloop); 2941 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2942 __ lduh(str2_reg, limit_reg, chr2_reg); 2943 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2944 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2945 assert(chr1_reg == result_reg, "result must be pre-placed"); 2946 __ delayed()->inccc(limit_reg, sizeof(jchar)); 2947 // annul LDUH if branch is not taken to prevent access past end of string 2948 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 2949 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2950 2951 // If strings are equal up to min length, return the length difference. 2952 __ mov(O7, result_reg); 2953 2954 // Otherwise, return the difference between the first mismatched chars. 2955 __ bind(Ldone); 2956 %} 2957 2958 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{ 2959 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone; 2960 MacroAssembler _masm(&cbuf); 2961 2962 Register str1_reg = reg_to_register_object($str1$$reg); 2963 Register str2_reg = reg_to_register_object($str2$$reg); 2964 Register cnt_reg = reg_to_register_object($cnt$$reg); 2965 Register tmp1_reg = O7; 2966 Register result_reg = reg_to_register_object($result$$reg); 2967 2968 assert(result_reg != str1_reg && 2969 result_reg != str2_reg && 2970 result_reg != cnt_reg && 2971 result_reg != tmp1_reg , 2972 "need different registers"); 2973 2974 __ cmp(str1_reg, str2_reg); //same char[] ? 2975 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 2976 __ delayed()->add(G0, 1, result_reg); 2977 2978 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn); 2979 __ delayed()->add(G0, 1, result_reg); // count == 0 2980 2981 //rename registers 2982 Register limit_reg = cnt_reg; 2983 Register chr1_reg = result_reg; 2984 Register chr2_reg = tmp1_reg; 2985 2986 // We have no guarantee that on 64 bit the higher half of limit_reg is 0 2987 __ signx(limit_reg); 2988 2989 //check for alignment and position the pointers to the ends 2990 __ or3(str1_reg, str2_reg, chr1_reg); 2991 __ andcc(chr1_reg, 0x3, chr1_reg); 2992 // notZero means at least one not 4-byte aligned. 2993 // We could optimize the case when both arrays are not aligned 2994 // but it is not frequent case and it requires additional checks. 2995 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare 2996 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count 2997 2998 // Compare char[] arrays aligned to 4 bytes. 2999 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg, 3000 chr1_reg, chr2_reg, Ldone); 3001 __ ba(Ldone); 3002 __ delayed()->add(G0, 1, result_reg); 3003 3004 // char by char compare 3005 __ bind(Lchar); 3006 __ add(str1_reg, limit_reg, str1_reg); 3007 __ add(str2_reg, limit_reg, str2_reg); 3008 __ neg(limit_reg); //negate count 3009 3010 __ lduh(str1_reg, limit_reg, chr1_reg); 3011 // Lchar_loop 3012 __ bind(Lchar_loop); 3013 __ lduh(str2_reg, limit_reg, chr2_reg); 3014 __ cmp(chr1_reg, chr2_reg); 3015 __ br(Assembler::notEqual, true, Assembler::pt, Ldone); 3016 __ delayed()->mov(G0, result_reg); //not equal 3017 __ inccc(limit_reg, sizeof(jchar)); 3018 // annul LDUH if branch is not taken to prevent access past end of string 3019 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop); 3020 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3021 3022 __ add(G0, 1, result_reg); //equal 3023 3024 __ bind(Ldone); 3025 %} 3026 3027 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{ 3028 Label Lvector, Ldone, Lloop; 3029 MacroAssembler _masm(&cbuf); 3030 3031 Register ary1_reg = reg_to_register_object($ary1$$reg); 3032 Register ary2_reg = reg_to_register_object($ary2$$reg); 3033 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 3034 Register tmp2_reg = O7; 3035 Register result_reg = reg_to_register_object($result$$reg); 3036 3037 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3038 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 3039 3040 // return true if the same array 3041 __ cmp(ary1_reg, ary2_reg); 3042 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3043 __ delayed()->add(G0, 1, result_reg); // equal 3044 3045 __ br_null(ary1_reg, true, Assembler::pn, Ldone); 3046 __ delayed()->mov(G0, result_reg); // not equal 3047 3048 __ br_null(ary2_reg, true, Assembler::pn, Ldone); 3049 __ delayed()->mov(G0, result_reg); // not equal 3050 3051 //load the lengths of arrays 3052 __ ld(Address(ary1_reg, length_offset), tmp1_reg); 3053 __ ld(Address(ary2_reg, length_offset), tmp2_reg); 3054 3055 // return false if the two arrays are not equal length 3056 __ cmp(tmp1_reg, tmp2_reg); 3057 __ br(Assembler::notEqual, true, Assembler::pn, Ldone); 3058 __ delayed()->mov(G0, result_reg); // not equal 3059 3060 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn); 3061 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal 3062 3063 // load array addresses 3064 __ add(ary1_reg, base_offset, ary1_reg); 3065 __ add(ary2_reg, base_offset, ary2_reg); 3066 3067 // renaming registers 3068 Register chr1_reg = result_reg; // for characters in ary1 3069 Register chr2_reg = tmp2_reg; // for characters in ary2 3070 Register limit_reg = tmp1_reg; // length 3071 3072 // set byte count 3073 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); 3074 3075 // Compare char[] arrays aligned to 4 bytes. 3076 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg, 3077 chr1_reg, chr2_reg, Ldone); 3078 __ add(G0, 1, result_reg); // equals 3079 3080 __ bind(Ldone); 3081 %} 3082 3083 enc_class enc_rethrow() %{ 3084 cbuf.set_insts_mark(); 3085 Register temp_reg = G3; 3086 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 3087 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 3088 MacroAssembler _masm(&cbuf); 3089 #ifdef ASSERT 3090 __ save_frame(0); 3091 AddressLiteral last_rethrow_addrlit(&last_rethrow); 3092 __ sethi(last_rethrow_addrlit, L1); 3093 Address addr(L1, last_rethrow_addrlit.low10()); 3094 __ rdpc(L2); 3095 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 3096 __ st_ptr(L2, addr); 3097 __ restore(); 3098 #endif 3099 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 3100 __ delayed()->nop(); 3101 %} 3102 3103 enc_class emit_mem_nop() %{ 3104 // Generates the instruction LDUXA [o6,g0],#0x82,g0 3105 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 3106 %} 3107 3108 enc_class emit_fadd_nop() %{ 3109 // Generates the instruction FMOVS f31,f31 3110 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 3111 %} 3112 3113 enc_class emit_br_nop() %{ 3114 // Generates the instruction BPN,PN . 3115 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 3116 %} 3117 3118 enc_class enc_membar_acquire %{ 3119 MacroAssembler _masm(&cbuf); 3120 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 3121 %} 3122 3123 enc_class enc_membar_release %{ 3124 MacroAssembler _masm(&cbuf); 3125 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 3126 %} 3127 3128 enc_class enc_membar_volatile %{ 3129 MacroAssembler _masm(&cbuf); 3130 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3131 %} 3132 3133 %} 3134 3135 //----------FRAME-------------------------------------------------------------- 3136 // Definition of frame structure and management information. 3137 // 3138 // S T A C K L A Y O U T Allocators stack-slot number 3139 // | (to get allocators register number 3140 // G Owned by | | v add VMRegImpl::stack0) 3141 // r CALLER | | 3142 // o | +--------+ pad to even-align allocators stack-slot 3143 // w V | pad0 | numbers; owned by CALLER 3144 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3145 // h ^ | in | 5 3146 // | | args | 4 Holes in incoming args owned by SELF 3147 // | | | | 3 3148 // | | +--------+ 3149 // V | | old out| Empty on Intel, window on Sparc 3150 // | old |preserve| Must be even aligned. 3151 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 3152 // | | in | 3 area for Intel ret address 3153 // Owned by |preserve| Empty on Sparc. 3154 // SELF +--------+ 3155 // | | pad2 | 2 pad to align old SP 3156 // | +--------+ 1 3157 // | | locks | 0 3158 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 3159 // | | pad1 | 11 pad to align new SP 3160 // | +--------+ 3161 // | | | 10 3162 // | | spills | 9 spills 3163 // V | | 8 (pad0 slot for callee) 3164 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3165 // ^ | out | 7 3166 // | | args | 6 Holes in outgoing args owned by CALLEE 3167 // Owned by +--------+ 3168 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3169 // | new |preserve| Must be even-aligned. 3170 // | SP-+--------+----> Matcher::_new_SP, even aligned 3171 // | | | 3172 // 3173 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3174 // known from SELF's arguments and the Java calling convention. 3175 // Region 6-7 is determined per call site. 3176 // Note 2: If the calling convention leaves holes in the incoming argument 3177 // area, those holes are owned by SELF. Holes in the outgoing area 3178 // are owned by the CALLEE. Holes should not be nessecary in the 3179 // incoming area, as the Java calling convention is completely under 3180 // the control of the AD file. Doubles can be sorted and packed to 3181 // avoid holes. Holes in the outgoing arguments may be nessecary for 3182 // varargs C calling conventions. 3183 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3184 // even aligned with pad0 as needed. 3185 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3186 // region 6-11 is even aligned; it may be padded out more so that 3187 // the region from SP to FP meets the minimum stack alignment. 3188 3189 frame %{ 3190 // What direction does stack grow in (assumed to be same for native & Java) 3191 stack_direction(TOWARDS_LOW); 3192 3193 // These two registers define part of the calling convention 3194 // between compiled code and the interpreter. 3195 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C 3196 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 3197 3198 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 3199 cisc_spilling_operand_name(indOffset); 3200 3201 // Number of stack slots consumed by a Monitor enter 3202 #ifdef _LP64 3203 sync_stack_slots(2); 3204 #else 3205 sync_stack_slots(1); 3206 #endif 3207 3208 // Compiled code's Frame Pointer 3209 frame_pointer(R_SP); 3210 3211 // Stack alignment requirement 3212 stack_alignment(StackAlignmentInBytes); 3213 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 3214 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 3215 3216 // Number of stack slots between incoming argument block and the start of 3217 // a new frame. The PROLOG must add this many slots to the stack. The 3218 // EPILOG must remove this many slots. 3219 in_preserve_stack_slots(0); 3220 3221 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3222 // for calls to C. Supports the var-args backing area for register parms. 3223 // ADLC doesn't support parsing expressions, so I folded the math by hand. 3224 #ifdef _LP64 3225 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 3226 varargs_C_out_slots_killed(12); 3227 #else 3228 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word 3229 varargs_C_out_slots_killed( 7); 3230 #endif 3231 3232 // The after-PROLOG location of the return address. Location of 3233 // return address specifies a type (REG or STACK) and a number 3234 // representing the register number (i.e. - use a register name) or 3235 // stack slot. 3236 return_addr(REG R_I7); // Ret Addr is in register I7 3237 3238 // Body of function which returns an OptoRegs array locating 3239 // arguments either in registers or in stack slots for calling 3240 // java 3241 calling_convention %{ 3242 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3243 3244 %} 3245 3246 // Body of function which returns an OptoRegs array locating 3247 // arguments either in registers or in stack slots for callin 3248 // C. 3249 c_calling_convention %{ 3250 // This is obviously always outgoing 3251 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length); 3252 %} 3253 3254 // Location of native (C/C++) and interpreter return values. This is specified to 3255 // be the same as Java. In the 32-bit VM, long values are actually returned from 3256 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3257 // to and from the register pairs is done by the appropriate call and epilog 3258 // opcodes. This simplifies the register allocator. 3259 c_return_value %{ 3260 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3261 #ifdef _LP64 3262 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3263 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3264 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3265 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3266 #else // !_LP64 3267 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3268 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3269 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3270 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3271 #endif 3272 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3273 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3274 %} 3275 3276 // Location of compiled Java return values. Same as C 3277 return_value %{ 3278 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3279 #ifdef _LP64 3280 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3281 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3282 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3283 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3284 #else // !_LP64 3285 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3286 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3287 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3288 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3289 #endif 3290 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3291 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3292 %} 3293 3294 %} 3295 3296 3297 //----------ATTRIBUTES--------------------------------------------------------- 3298 //----------Operand Attributes------------------------------------------------- 3299 op_attrib op_cost(1); // Required cost attribute 3300 3301 //----------Instruction Attributes--------------------------------------------- 3302 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3303 ins_attrib ins_size(32); // Required size attribute (in bits) 3304 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back 3305 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3306 // non-matching short branch variant of some 3307 // long branch? 3308 3309 //----------OPERANDS----------------------------------------------------------- 3310 // Operand definitions must precede instruction definitions for correct parsing 3311 // in the ADLC because operands constitute user defined types which are used in 3312 // instruction definitions. 3313 3314 //----------Simple Operands---------------------------------------------------- 3315 // Immediate Operands 3316 // Integer Immediate: 32-bit 3317 operand immI() %{ 3318 match(ConI); 3319 3320 op_cost(0); 3321 // formats are generated automatically for constants and base registers 3322 format %{ %} 3323 interface(CONST_INTER); 3324 %} 3325 3326 // Integer Immediate: 8-bit 3327 operand immI8() %{ 3328 predicate(Assembler::is_simm8(n->get_int())); 3329 match(ConI); 3330 op_cost(0); 3331 format %{ %} 3332 interface(CONST_INTER); 3333 %} 3334 3335 // Integer Immediate: 13-bit 3336 operand immI13() %{ 3337 predicate(Assembler::is_simm13(n->get_int())); 3338 match(ConI); 3339 op_cost(0); 3340 3341 format %{ %} 3342 interface(CONST_INTER); 3343 %} 3344 3345 // Integer Immediate: 13-bit minus 7 3346 operand immI13m7() %{ 3347 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3348 match(ConI); 3349 op_cost(0); 3350 3351 format %{ %} 3352 interface(CONST_INTER); 3353 %} 3354 3355 // Integer Immediate: 16-bit 3356 operand immI16() %{ 3357 predicate(Assembler::is_simm16(n->get_int())); 3358 match(ConI); 3359 op_cost(0); 3360 format %{ %} 3361 interface(CONST_INTER); 3362 %} 3363 3364 // Unsigned (positive) Integer Immediate: 13-bit 3365 operand immU13() %{ 3366 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3367 match(ConI); 3368 op_cost(0); 3369 3370 format %{ %} 3371 interface(CONST_INTER); 3372 %} 3373 3374 // Integer Immediate: 6-bit 3375 operand immU6() %{ 3376 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3377 match(ConI); 3378 op_cost(0); 3379 format %{ %} 3380 interface(CONST_INTER); 3381 %} 3382 3383 // Integer Immediate: 11-bit 3384 operand immI11() %{ 3385 predicate(Assembler::is_simm11(n->get_int())); 3386 match(ConI); 3387 op_cost(0); 3388 format %{ %} 3389 interface(CONST_INTER); 3390 %} 3391 3392 // Integer Immediate: 5-bit 3393 operand immI5() %{ 3394 predicate(Assembler::is_simm5(n->get_int())); 3395 match(ConI); 3396 op_cost(0); 3397 format %{ %} 3398 interface(CONST_INTER); 3399 %} 3400 3401 // Integer Immediate: 0-bit 3402 operand immI0() %{ 3403 predicate(n->get_int() == 0); 3404 match(ConI); 3405 op_cost(0); 3406 3407 format %{ %} 3408 interface(CONST_INTER); 3409 %} 3410 3411 // Integer Immediate: the value 10 3412 operand immI10() %{ 3413 predicate(n->get_int() == 10); 3414 match(ConI); 3415 op_cost(0); 3416 3417 format %{ %} 3418 interface(CONST_INTER); 3419 %} 3420 3421 // Integer Immediate: the values 0-31 3422 operand immU5() %{ 3423 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3424 match(ConI); 3425 op_cost(0); 3426 3427 format %{ %} 3428 interface(CONST_INTER); 3429 %} 3430 3431 // Integer Immediate: the values 1-31 3432 operand immI_1_31() %{ 3433 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3434 match(ConI); 3435 op_cost(0); 3436 3437 format %{ %} 3438 interface(CONST_INTER); 3439 %} 3440 3441 // Integer Immediate: the values 32-63 3442 operand immI_32_63() %{ 3443 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3444 match(ConI); 3445 op_cost(0); 3446 3447 format %{ %} 3448 interface(CONST_INTER); 3449 %} 3450 3451 // Immediates for special shifts (sign extend) 3452 3453 // Integer Immediate: the value 16 3454 operand immI_16() %{ 3455 predicate(n->get_int() == 16); 3456 match(ConI); 3457 op_cost(0); 3458 3459 format %{ %} 3460 interface(CONST_INTER); 3461 %} 3462 3463 // Integer Immediate: the value 24 3464 operand immI_24() %{ 3465 predicate(n->get_int() == 24); 3466 match(ConI); 3467 op_cost(0); 3468 3469 format %{ %} 3470 interface(CONST_INTER); 3471 %} 3472 3473 // Integer Immediate: the value 255 3474 operand immI_255() %{ 3475 predicate( n->get_int() == 255 ); 3476 match(ConI); 3477 op_cost(0); 3478 3479 format %{ %} 3480 interface(CONST_INTER); 3481 %} 3482 3483 // Integer Immediate: the value 65535 3484 operand immI_65535() %{ 3485 predicate(n->get_int() == 65535); 3486 match(ConI); 3487 op_cost(0); 3488 3489 format %{ %} 3490 interface(CONST_INTER); 3491 %} 3492 3493 // Long Immediate: the value FF 3494 operand immL_FF() %{ 3495 predicate( n->get_long() == 0xFFL ); 3496 match(ConL); 3497 op_cost(0); 3498 3499 format %{ %} 3500 interface(CONST_INTER); 3501 %} 3502 3503 // Long Immediate: the value FFFF 3504 operand immL_FFFF() %{ 3505 predicate( n->get_long() == 0xFFFFL ); 3506 match(ConL); 3507 op_cost(0); 3508 3509 format %{ %} 3510 interface(CONST_INTER); 3511 %} 3512 3513 // Pointer Immediate: 32 or 64-bit 3514 operand immP() %{ 3515 match(ConP); 3516 3517 op_cost(5); 3518 // formats are generated automatically for constants and base registers 3519 format %{ %} 3520 interface(CONST_INTER); 3521 %} 3522 3523 #ifdef _LP64 3524 // Pointer Immediate: 64-bit 3525 operand immP_set() %{ 3526 predicate(!VM_Version::is_niagara_plus()); 3527 match(ConP); 3528 3529 op_cost(5); 3530 // formats are generated automatically for constants and base registers 3531 format %{ %} 3532 interface(CONST_INTER); 3533 %} 3534 3535 // Pointer Immediate: 64-bit 3536 // From Niagara2 processors on a load should be better than materializing. 3537 operand immP_load() %{ 3538 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3539 match(ConP); 3540 3541 op_cost(5); 3542 // formats are generated automatically for constants and base registers 3543 format %{ %} 3544 interface(CONST_INTER); 3545 %} 3546 3547 // Pointer Immediate: 64-bit 3548 operand immP_no_oop_cheap() %{ 3549 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3550 match(ConP); 3551 3552 op_cost(5); 3553 // formats are generated automatically for constants and base registers 3554 format %{ %} 3555 interface(CONST_INTER); 3556 %} 3557 #endif 3558 3559 operand immP13() %{ 3560 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3561 match(ConP); 3562 op_cost(0); 3563 3564 format %{ %} 3565 interface(CONST_INTER); 3566 %} 3567 3568 operand immP0() %{ 3569 predicate(n->get_ptr() == 0); 3570 match(ConP); 3571 op_cost(0); 3572 3573 format %{ %} 3574 interface(CONST_INTER); 3575 %} 3576 3577 operand immP_poll() %{ 3578 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3579 match(ConP); 3580 3581 // formats are generated automatically for constants and base registers 3582 format %{ %} 3583 interface(CONST_INTER); 3584 %} 3585 3586 // Pointer Immediate 3587 operand immN() 3588 %{ 3589 match(ConN); 3590 3591 op_cost(10); 3592 format %{ %} 3593 interface(CONST_INTER); 3594 %} 3595 3596 operand immNKlass() 3597 %{ 3598 match(ConNKlass); 3599 3600 op_cost(10); 3601 format %{ %} 3602 interface(CONST_INTER); 3603 %} 3604 3605 // NULL Pointer Immediate 3606 operand immN0() 3607 %{ 3608 predicate(n->get_narrowcon() == 0); 3609 match(ConN); 3610 3611 op_cost(0); 3612 format %{ %} 3613 interface(CONST_INTER); 3614 %} 3615 3616 operand immL() %{ 3617 match(ConL); 3618 op_cost(40); 3619 // formats are generated automatically for constants and base registers 3620 format %{ %} 3621 interface(CONST_INTER); 3622 %} 3623 3624 operand immL0() %{ 3625 predicate(n->get_long() == 0L); 3626 match(ConL); 3627 op_cost(0); 3628 // formats are generated automatically for constants and base registers 3629 format %{ %} 3630 interface(CONST_INTER); 3631 %} 3632 3633 // Integer Immediate: 5-bit 3634 operand immL5() %{ 3635 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3636 match(ConL); 3637 op_cost(0); 3638 format %{ %} 3639 interface(CONST_INTER); 3640 %} 3641 3642 // Long Immediate: 13-bit 3643 operand immL13() %{ 3644 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3645 match(ConL); 3646 op_cost(0); 3647 3648 format %{ %} 3649 interface(CONST_INTER); 3650 %} 3651 3652 // Long Immediate: 13-bit minus 7 3653 operand immL13m7() %{ 3654 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3655 match(ConL); 3656 op_cost(0); 3657 3658 format %{ %} 3659 interface(CONST_INTER); 3660 %} 3661 3662 // Long Immediate: low 32-bit mask 3663 operand immL_32bits() %{ 3664 predicate(n->get_long() == 0xFFFFFFFFL); 3665 match(ConL); 3666 op_cost(0); 3667 3668 format %{ %} 3669 interface(CONST_INTER); 3670 %} 3671 3672 // Long Immediate: cheap (materialize in <= 3 instructions) 3673 operand immL_cheap() %{ 3674 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3675 match(ConL); 3676 op_cost(0); 3677 3678 format %{ %} 3679 interface(CONST_INTER); 3680 %} 3681 3682 // Long Immediate: expensive (materialize in > 3 instructions) 3683 operand immL_expensive() %{ 3684 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3685 match(ConL); 3686 op_cost(0); 3687 3688 format %{ %} 3689 interface(CONST_INTER); 3690 %} 3691 3692 // Double Immediate 3693 operand immD() %{ 3694 match(ConD); 3695 3696 op_cost(40); 3697 format %{ %} 3698 interface(CONST_INTER); 3699 %} 3700 3701 operand immD0() %{ 3702 #ifdef _LP64 3703 // on 64-bit architectures this comparision is faster 3704 predicate(jlong_cast(n->getd()) == 0); 3705 #else 3706 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO)); 3707 #endif 3708 match(ConD); 3709 3710 op_cost(0); 3711 format %{ %} 3712 interface(CONST_INTER); 3713 %} 3714 3715 // Float Immediate 3716 operand immF() %{ 3717 match(ConF); 3718 3719 op_cost(20); 3720 format %{ %} 3721 interface(CONST_INTER); 3722 %} 3723 3724 // Float Immediate: 0 3725 operand immF0() %{ 3726 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO)); 3727 match(ConF); 3728 3729 op_cost(0); 3730 format %{ %} 3731 interface(CONST_INTER); 3732 %} 3733 3734 // Integer Register Operands 3735 // Integer Register 3736 operand iRegI() %{ 3737 constraint(ALLOC_IN_RC(int_reg)); 3738 match(RegI); 3739 3740 match(notemp_iRegI); 3741 match(g1RegI); 3742 match(o0RegI); 3743 match(iRegIsafe); 3744 3745 format %{ %} 3746 interface(REG_INTER); 3747 %} 3748 3749 operand notemp_iRegI() %{ 3750 constraint(ALLOC_IN_RC(notemp_int_reg)); 3751 match(RegI); 3752 3753 match(o0RegI); 3754 3755 format %{ %} 3756 interface(REG_INTER); 3757 %} 3758 3759 operand o0RegI() %{ 3760 constraint(ALLOC_IN_RC(o0_regI)); 3761 match(iRegI); 3762 3763 format %{ %} 3764 interface(REG_INTER); 3765 %} 3766 3767 // Pointer Register 3768 operand iRegP() %{ 3769 constraint(ALLOC_IN_RC(ptr_reg)); 3770 match(RegP); 3771 3772 match(lock_ptr_RegP); 3773 match(g1RegP); 3774 match(g2RegP); 3775 match(g3RegP); 3776 match(g4RegP); 3777 match(i0RegP); 3778 match(o0RegP); 3779 match(o1RegP); 3780 match(l7RegP); 3781 3782 format %{ %} 3783 interface(REG_INTER); 3784 %} 3785 3786 operand sp_ptr_RegP() %{ 3787 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3788 match(RegP); 3789 match(iRegP); 3790 3791 format %{ %} 3792 interface(REG_INTER); 3793 %} 3794 3795 operand lock_ptr_RegP() %{ 3796 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3797 match(RegP); 3798 match(i0RegP); 3799 match(o0RegP); 3800 match(o1RegP); 3801 match(l7RegP); 3802 3803 format %{ %} 3804 interface(REG_INTER); 3805 %} 3806 3807 operand g1RegP() %{ 3808 constraint(ALLOC_IN_RC(g1_regP)); 3809 match(iRegP); 3810 3811 format %{ %} 3812 interface(REG_INTER); 3813 %} 3814 3815 operand g2RegP() %{ 3816 constraint(ALLOC_IN_RC(g2_regP)); 3817 match(iRegP); 3818 3819 format %{ %} 3820 interface(REG_INTER); 3821 %} 3822 3823 operand g3RegP() %{ 3824 constraint(ALLOC_IN_RC(g3_regP)); 3825 match(iRegP); 3826 3827 format %{ %} 3828 interface(REG_INTER); 3829 %} 3830 3831 operand g1RegI() %{ 3832 constraint(ALLOC_IN_RC(g1_regI)); 3833 match(iRegI); 3834 3835 format %{ %} 3836 interface(REG_INTER); 3837 %} 3838 3839 operand g3RegI() %{ 3840 constraint(ALLOC_IN_RC(g3_regI)); 3841 match(iRegI); 3842 3843 format %{ %} 3844 interface(REG_INTER); 3845 %} 3846 3847 operand g4RegI() %{ 3848 constraint(ALLOC_IN_RC(g4_regI)); 3849 match(iRegI); 3850 3851 format %{ %} 3852 interface(REG_INTER); 3853 %} 3854 3855 operand g4RegP() %{ 3856 constraint(ALLOC_IN_RC(g4_regP)); 3857 match(iRegP); 3858 3859 format %{ %} 3860 interface(REG_INTER); 3861 %} 3862 3863 operand i0RegP() %{ 3864 constraint(ALLOC_IN_RC(i0_regP)); 3865 match(iRegP); 3866 3867 format %{ %} 3868 interface(REG_INTER); 3869 %} 3870 3871 operand o0RegP() %{ 3872 constraint(ALLOC_IN_RC(o0_regP)); 3873 match(iRegP); 3874 3875 format %{ %} 3876 interface(REG_INTER); 3877 %} 3878 3879 operand o1RegP() %{ 3880 constraint(ALLOC_IN_RC(o1_regP)); 3881 match(iRegP); 3882 3883 format %{ %} 3884 interface(REG_INTER); 3885 %} 3886 3887 operand o2RegP() %{ 3888 constraint(ALLOC_IN_RC(o2_regP)); 3889 match(iRegP); 3890 3891 format %{ %} 3892 interface(REG_INTER); 3893 %} 3894 3895 operand o7RegP() %{ 3896 constraint(ALLOC_IN_RC(o7_regP)); 3897 match(iRegP); 3898 3899 format %{ %} 3900 interface(REG_INTER); 3901 %} 3902 3903 operand l7RegP() %{ 3904 constraint(ALLOC_IN_RC(l7_regP)); 3905 match(iRegP); 3906 3907 format %{ %} 3908 interface(REG_INTER); 3909 %} 3910 3911 operand o7RegI() %{ 3912 constraint(ALLOC_IN_RC(o7_regI)); 3913 match(iRegI); 3914 3915 format %{ %} 3916 interface(REG_INTER); 3917 %} 3918 3919 operand iRegN() %{ 3920 constraint(ALLOC_IN_RC(int_reg)); 3921 match(RegN); 3922 3923 format %{ %} 3924 interface(REG_INTER); 3925 %} 3926 3927 // Long Register 3928 operand iRegL() %{ 3929 constraint(ALLOC_IN_RC(long_reg)); 3930 match(RegL); 3931 3932 format %{ %} 3933 interface(REG_INTER); 3934 %} 3935 3936 operand o2RegL() %{ 3937 constraint(ALLOC_IN_RC(o2_regL)); 3938 match(iRegL); 3939 3940 format %{ %} 3941 interface(REG_INTER); 3942 %} 3943 3944 operand o7RegL() %{ 3945 constraint(ALLOC_IN_RC(o7_regL)); 3946 match(iRegL); 3947 3948 format %{ %} 3949 interface(REG_INTER); 3950 %} 3951 3952 operand g1RegL() %{ 3953 constraint(ALLOC_IN_RC(g1_regL)); 3954 match(iRegL); 3955 3956 format %{ %} 3957 interface(REG_INTER); 3958 %} 3959 3960 operand g3RegL() %{ 3961 constraint(ALLOC_IN_RC(g3_regL)); 3962 match(iRegL); 3963 3964 format %{ %} 3965 interface(REG_INTER); 3966 %} 3967 3968 // Int Register safe 3969 // This is 64bit safe 3970 operand iRegIsafe() %{ 3971 constraint(ALLOC_IN_RC(long_reg)); 3972 3973 match(iRegI); 3974 3975 format %{ %} 3976 interface(REG_INTER); 3977 %} 3978 3979 // Condition Code Flag Register 3980 operand flagsReg() %{ 3981 constraint(ALLOC_IN_RC(int_flags)); 3982 match(RegFlags); 3983 3984 format %{ "ccr" %} // both ICC and XCC 3985 interface(REG_INTER); 3986 %} 3987 3988 // Condition Code Register, unsigned comparisons. 3989 operand flagsRegU() %{ 3990 constraint(ALLOC_IN_RC(int_flags)); 3991 match(RegFlags); 3992 3993 format %{ "icc_U" %} 3994 interface(REG_INTER); 3995 %} 3996 3997 // Condition Code Register, pointer comparisons. 3998 operand flagsRegP() %{ 3999 constraint(ALLOC_IN_RC(int_flags)); 4000 match(RegFlags); 4001 4002 #ifdef _LP64 4003 format %{ "xcc_P" %} 4004 #else 4005 format %{ "icc_P" %} 4006 #endif 4007 interface(REG_INTER); 4008 %} 4009 4010 // Condition Code Register, long comparisons. 4011 operand flagsRegL() %{ 4012 constraint(ALLOC_IN_RC(int_flags)); 4013 match(RegFlags); 4014 4015 format %{ "xcc_L" %} 4016 interface(REG_INTER); 4017 %} 4018 4019 // Condition Code Register, floating comparisons, unordered same as "less". 4020 operand flagsRegF() %{ 4021 constraint(ALLOC_IN_RC(float_flags)); 4022 match(RegFlags); 4023 match(flagsRegF0); 4024 4025 format %{ %} 4026 interface(REG_INTER); 4027 %} 4028 4029 operand flagsRegF0() %{ 4030 constraint(ALLOC_IN_RC(float_flag0)); 4031 match(RegFlags); 4032 4033 format %{ %} 4034 interface(REG_INTER); 4035 %} 4036 4037 4038 // Condition Code Flag Register used by long compare 4039 operand flagsReg_long_LTGE() %{ 4040 constraint(ALLOC_IN_RC(int_flags)); 4041 match(RegFlags); 4042 format %{ "icc_LTGE" %} 4043 interface(REG_INTER); 4044 %} 4045 operand flagsReg_long_EQNE() %{ 4046 constraint(ALLOC_IN_RC(int_flags)); 4047 match(RegFlags); 4048 format %{ "icc_EQNE" %} 4049 interface(REG_INTER); 4050 %} 4051 operand flagsReg_long_LEGT() %{ 4052 constraint(ALLOC_IN_RC(int_flags)); 4053 match(RegFlags); 4054 format %{ "icc_LEGT" %} 4055 interface(REG_INTER); 4056 %} 4057 4058 4059 operand regD() %{ 4060 constraint(ALLOC_IN_RC(dflt_reg)); 4061 match(RegD); 4062 4063 match(regD_low); 4064 4065 format %{ %} 4066 interface(REG_INTER); 4067 %} 4068 4069 operand regF() %{ 4070 constraint(ALLOC_IN_RC(sflt_reg)); 4071 match(RegF); 4072 4073 format %{ %} 4074 interface(REG_INTER); 4075 %} 4076 4077 operand regD_low() %{ 4078 constraint(ALLOC_IN_RC(dflt_low_reg)); 4079 match(regD); 4080 4081 format %{ %} 4082 interface(REG_INTER); 4083 %} 4084 4085 // Special Registers 4086 4087 // Method Register 4088 operand inline_cache_regP(iRegP reg) %{ 4089 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 4090 match(reg); 4091 format %{ %} 4092 interface(REG_INTER); 4093 %} 4094 4095 operand interpreter_method_oop_regP(iRegP reg) %{ 4096 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 4097 match(reg); 4098 format %{ %} 4099 interface(REG_INTER); 4100 %} 4101 4102 4103 //----------Complex Operands--------------------------------------------------- 4104 // Indirect Memory Reference 4105 operand indirect(sp_ptr_RegP reg) %{ 4106 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4107 match(reg); 4108 4109 op_cost(100); 4110 format %{ "[$reg]" %} 4111 interface(MEMORY_INTER) %{ 4112 base($reg); 4113 index(0x0); 4114 scale(0x0); 4115 disp(0x0); 4116 %} 4117 %} 4118 4119 // Indirect with simm13 Offset 4120 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 4121 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4122 match(AddP reg offset); 4123 4124 op_cost(100); 4125 format %{ "[$reg + $offset]" %} 4126 interface(MEMORY_INTER) %{ 4127 base($reg); 4128 index(0x0); 4129 scale(0x0); 4130 disp($offset); 4131 %} 4132 %} 4133 4134 // Indirect with simm13 Offset minus 7 4135 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 4136 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4137 match(AddP reg offset); 4138 4139 op_cost(100); 4140 format %{ "[$reg + $offset]" %} 4141 interface(MEMORY_INTER) %{ 4142 base($reg); 4143 index(0x0); 4144 scale(0x0); 4145 disp($offset); 4146 %} 4147 %} 4148 4149 // Note: Intel has a swapped version also, like this: 4150 //operand indOffsetX(iRegI reg, immP offset) %{ 4151 // constraint(ALLOC_IN_RC(int_reg)); 4152 // match(AddP offset reg); 4153 // 4154 // op_cost(100); 4155 // format %{ "[$reg + $offset]" %} 4156 // interface(MEMORY_INTER) %{ 4157 // base($reg); 4158 // index(0x0); 4159 // scale(0x0); 4160 // disp($offset); 4161 // %} 4162 //%} 4163 //// However, it doesn't make sense for SPARC, since 4164 // we have no particularly good way to embed oops in 4165 // single instructions. 4166 4167 // Indirect with Register Index 4168 operand indIndex(iRegP addr, iRegX index) %{ 4169 constraint(ALLOC_IN_RC(ptr_reg)); 4170 match(AddP addr index); 4171 4172 op_cost(100); 4173 format %{ "[$addr + $index]" %} 4174 interface(MEMORY_INTER) %{ 4175 base($addr); 4176 index($index); 4177 scale(0x0); 4178 disp(0x0); 4179 %} 4180 %} 4181 4182 //----------Special Memory Operands-------------------------------------------- 4183 // Stack Slot Operand - This operand is used for loading and storing temporary 4184 // values on the stack where a match requires a value to 4185 // flow through memory. 4186 operand stackSlotI(sRegI reg) %{ 4187 constraint(ALLOC_IN_RC(stack_slots)); 4188 op_cost(100); 4189 //match(RegI); 4190 format %{ "[$reg]" %} 4191 interface(MEMORY_INTER) %{ 4192 base(0xE); // R_SP 4193 index(0x0); 4194 scale(0x0); 4195 disp($reg); // Stack Offset 4196 %} 4197 %} 4198 4199 operand stackSlotP(sRegP reg) %{ 4200 constraint(ALLOC_IN_RC(stack_slots)); 4201 op_cost(100); 4202 //match(RegP); 4203 format %{ "[$reg]" %} 4204 interface(MEMORY_INTER) %{ 4205 base(0xE); // R_SP 4206 index(0x0); 4207 scale(0x0); 4208 disp($reg); // Stack Offset 4209 %} 4210 %} 4211 4212 operand stackSlotF(sRegF reg) %{ 4213 constraint(ALLOC_IN_RC(stack_slots)); 4214 op_cost(100); 4215 //match(RegF); 4216 format %{ "[$reg]" %} 4217 interface(MEMORY_INTER) %{ 4218 base(0xE); // R_SP 4219 index(0x0); 4220 scale(0x0); 4221 disp($reg); // Stack Offset 4222 %} 4223 %} 4224 operand stackSlotD(sRegD reg) %{ 4225 constraint(ALLOC_IN_RC(stack_slots)); 4226 op_cost(100); 4227 //match(RegD); 4228 format %{ "[$reg]" %} 4229 interface(MEMORY_INTER) %{ 4230 base(0xE); // R_SP 4231 index(0x0); 4232 scale(0x0); 4233 disp($reg); // Stack Offset 4234 %} 4235 %} 4236 operand stackSlotL(sRegL reg) %{ 4237 constraint(ALLOC_IN_RC(stack_slots)); 4238 op_cost(100); 4239 //match(RegL); 4240 format %{ "[$reg]" %} 4241 interface(MEMORY_INTER) %{ 4242 base(0xE); // R_SP 4243 index(0x0); 4244 scale(0x0); 4245 disp($reg); // Stack Offset 4246 %} 4247 %} 4248 4249 // Operands for expressing Control Flow 4250 // NOTE: Label is a predefined operand which should not be redefined in 4251 // the AD file. It is generically handled within the ADLC. 4252 4253 //----------Conditional Branch Operands---------------------------------------- 4254 // Comparison Op - This is the operation of the comparison, and is limited to 4255 // the following set of codes: 4256 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4257 // 4258 // Other attributes of the comparison, such as unsignedness, are specified 4259 // by the comparison instruction that sets a condition code flags register. 4260 // That result is represented by a flags operand whose subtype is appropriate 4261 // to the unsignedness (etc.) of the comparison. 4262 // 4263 // Later, the instruction which matches both the Comparison Op (a Bool) and 4264 // the flags (produced by the Cmp) specifies the coding of the comparison op 4265 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4266 4267 operand cmpOp() %{ 4268 match(Bool); 4269 4270 format %{ "" %} 4271 interface(COND_INTER) %{ 4272 equal(0x1); 4273 not_equal(0x9); 4274 less(0x3); 4275 greater_equal(0xB); 4276 less_equal(0x2); 4277 greater(0xA); 4278 overflow(0x7); 4279 no_overflow(0xF); 4280 %} 4281 %} 4282 4283 // Comparison Op, unsigned 4284 operand cmpOpU() %{ 4285 match(Bool); 4286 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4287 n->as_Bool()->_test._test != BoolTest::no_overflow); 4288 4289 format %{ "u" %} 4290 interface(COND_INTER) %{ 4291 equal(0x1); 4292 not_equal(0x9); 4293 less(0x5); 4294 greater_equal(0xD); 4295 less_equal(0x4); 4296 greater(0xC); 4297 overflow(0x7); 4298 no_overflow(0xF); 4299 %} 4300 %} 4301 4302 // Comparison Op, pointer (same as unsigned) 4303 operand cmpOpP() %{ 4304 match(Bool); 4305 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4306 n->as_Bool()->_test._test != BoolTest::no_overflow); 4307 4308 format %{ "p" %} 4309 interface(COND_INTER) %{ 4310 equal(0x1); 4311 not_equal(0x9); 4312 less(0x5); 4313 greater_equal(0xD); 4314 less_equal(0x4); 4315 greater(0xC); 4316 overflow(0x7); 4317 no_overflow(0xF); 4318 %} 4319 %} 4320 4321 // Comparison Op, branch-register encoding 4322 operand cmpOp_reg() %{ 4323 match(Bool); 4324 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4325 n->as_Bool()->_test._test != BoolTest::no_overflow); 4326 4327 format %{ "" %} 4328 interface(COND_INTER) %{ 4329 equal (0x1); 4330 not_equal (0x5); 4331 less (0x3); 4332 greater_equal(0x7); 4333 less_equal (0x2); 4334 greater (0x6); 4335 overflow(0x7); // not supported 4336 no_overflow(0xF); // not supported 4337 %} 4338 %} 4339 4340 // Comparison Code, floating, unordered same as less 4341 operand cmpOpF() %{ 4342 match(Bool); 4343 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4344 n->as_Bool()->_test._test != BoolTest::no_overflow); 4345 4346 format %{ "fl" %} 4347 interface(COND_INTER) %{ 4348 equal(0x9); 4349 not_equal(0x1); 4350 less(0x3); 4351 greater_equal(0xB); 4352 less_equal(0xE); 4353 greater(0x6); 4354 4355 overflow(0x7); // not supported 4356 no_overflow(0xF); // not supported 4357 %} 4358 %} 4359 4360 // Used by long compare 4361 operand cmpOp_commute() %{ 4362 match(Bool); 4363 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4364 n->as_Bool()->_test._test != BoolTest::no_overflow); 4365 4366 format %{ "" %} 4367 interface(COND_INTER) %{ 4368 equal(0x1); 4369 not_equal(0x9); 4370 less(0xA); 4371 greater_equal(0x2); 4372 less_equal(0xB); 4373 greater(0x3); 4374 overflow(0x7); 4375 no_overflow(0xF); 4376 %} 4377 %} 4378 4379 //----------OPERAND CLASSES---------------------------------------------------- 4380 // Operand Classes are groups of operands that are used to simplify 4381 // instruction definitions by not requiring the AD writer to specify separate 4382 // instructions for every form of operand when the instruction accepts 4383 // multiple operand types with the same basic encoding and format. The classic 4384 // case of this is memory operands. 4385 opclass memory( indirect, indOffset13, indIndex ); 4386 opclass indIndexMemory( indIndex ); 4387 4388 //----------PIPELINE----------------------------------------------------------- 4389 pipeline %{ 4390 4391 //----------ATTRIBUTES--------------------------------------------------------- 4392 attributes %{ 4393 fixed_size_instructions; // Fixed size instructions 4394 branch_has_delay_slot; // Branch has delay slot following 4395 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4396 instruction_unit_size = 4; // An instruction is 4 bytes long 4397 instruction_fetch_unit_size = 16; // The processor fetches one line 4398 instruction_fetch_units = 1; // of 16 bytes 4399 4400 // List of nop instructions 4401 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4402 %} 4403 4404 //----------RESOURCES---------------------------------------------------------- 4405 // Resources are the functional units available to the machine 4406 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4407 4408 //----------PIPELINE DESCRIPTION----------------------------------------------- 4409 // Pipeline Description specifies the stages in the machine's pipeline 4410 4411 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4412 4413 //----------PIPELINE CLASSES--------------------------------------------------- 4414 // Pipeline Classes describe the stages in which input and output are 4415 // referenced by the hardware pipeline. 4416 4417 // Integer ALU reg-reg operation 4418 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4419 single_instruction; 4420 dst : E(write); 4421 src1 : R(read); 4422 src2 : R(read); 4423 IALU : R; 4424 %} 4425 4426 // Integer ALU reg-reg long operation 4427 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4428 instruction_count(2); 4429 dst : E(write); 4430 src1 : R(read); 4431 src2 : R(read); 4432 IALU : R; 4433 IALU : R; 4434 %} 4435 4436 // Integer ALU reg-reg long dependent operation 4437 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4438 instruction_count(1); multiple_bundles; 4439 dst : E(write); 4440 src1 : R(read); 4441 src2 : R(read); 4442 cr : E(write); 4443 IALU : R(2); 4444 %} 4445 4446 // Integer ALU reg-imm operaion 4447 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4448 single_instruction; 4449 dst : E(write); 4450 src1 : R(read); 4451 IALU : R; 4452 %} 4453 4454 // Integer ALU reg-reg operation with condition code 4455 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4456 single_instruction; 4457 dst : E(write); 4458 cr : E(write); 4459 src1 : R(read); 4460 src2 : R(read); 4461 IALU : R; 4462 %} 4463 4464 // Integer ALU reg-imm operation with condition code 4465 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4466 single_instruction; 4467 dst : E(write); 4468 cr : E(write); 4469 src1 : R(read); 4470 IALU : R; 4471 %} 4472 4473 // Integer ALU zero-reg operation 4474 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4475 single_instruction; 4476 dst : E(write); 4477 src2 : R(read); 4478 IALU : R; 4479 %} 4480 4481 // Integer ALU zero-reg operation with condition code only 4482 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4483 single_instruction; 4484 cr : E(write); 4485 src : R(read); 4486 IALU : R; 4487 %} 4488 4489 // Integer ALU reg-reg operation with condition code only 4490 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4491 single_instruction; 4492 cr : E(write); 4493 src1 : R(read); 4494 src2 : R(read); 4495 IALU : R; 4496 %} 4497 4498 // Integer ALU reg-imm operation with condition code only 4499 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4500 single_instruction; 4501 cr : E(write); 4502 src1 : R(read); 4503 IALU : R; 4504 %} 4505 4506 // Integer ALU reg-reg-zero operation with condition code only 4507 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4508 single_instruction; 4509 cr : E(write); 4510 src1 : R(read); 4511 src2 : R(read); 4512 IALU : R; 4513 %} 4514 4515 // Integer ALU reg-imm-zero operation with condition code only 4516 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4517 single_instruction; 4518 cr : E(write); 4519 src1 : R(read); 4520 IALU : R; 4521 %} 4522 4523 // Integer ALU reg-reg operation with condition code, src1 modified 4524 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4525 single_instruction; 4526 cr : E(write); 4527 src1 : E(write); 4528 src1 : R(read); 4529 src2 : R(read); 4530 IALU : R; 4531 %} 4532 4533 // Integer ALU reg-imm operation with condition code, src1 modified 4534 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4535 single_instruction; 4536 cr : E(write); 4537 src1 : E(write); 4538 src1 : R(read); 4539 IALU : R; 4540 %} 4541 4542 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4543 multiple_bundles; 4544 dst : E(write)+4; 4545 cr : E(write); 4546 src1 : R(read); 4547 src2 : R(read); 4548 IALU : R(3); 4549 BR : R(2); 4550 %} 4551 4552 // Integer ALU operation 4553 pipe_class ialu_none(iRegI dst) %{ 4554 single_instruction; 4555 dst : E(write); 4556 IALU : R; 4557 %} 4558 4559 // Integer ALU reg operation 4560 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4561 single_instruction; may_have_no_code; 4562 dst : E(write); 4563 src : R(read); 4564 IALU : R; 4565 %} 4566 4567 // Integer ALU reg conditional operation 4568 // This instruction has a 1 cycle stall, and cannot execute 4569 // in the same cycle as the instruction setting the condition 4570 // code. We kludge this by pretending to read the condition code 4571 // 1 cycle earlier, and by marking the functional units as busy 4572 // for 2 cycles with the result available 1 cycle later than 4573 // is really the case. 4574 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4575 single_instruction; 4576 op2_out : C(write); 4577 op1 : R(read); 4578 cr : R(read); // This is really E, with a 1 cycle stall 4579 BR : R(2); 4580 MS : R(2); 4581 %} 4582 4583 #ifdef _LP64 4584 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4585 instruction_count(1); multiple_bundles; 4586 dst : C(write)+1; 4587 src : R(read)+1; 4588 IALU : R(1); 4589 BR : E(2); 4590 MS : E(2); 4591 %} 4592 #endif 4593 4594 // Integer ALU reg operation 4595 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4596 single_instruction; may_have_no_code; 4597 dst : E(write); 4598 src : R(read); 4599 IALU : R; 4600 %} 4601 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4602 single_instruction; may_have_no_code; 4603 dst : E(write); 4604 src : R(read); 4605 IALU : R; 4606 %} 4607 4608 // Two integer ALU reg operations 4609 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4610 instruction_count(2); 4611 dst : E(write); 4612 src : R(read); 4613 A0 : R; 4614 A1 : R; 4615 %} 4616 4617 // Two integer ALU reg operations 4618 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4619 instruction_count(2); may_have_no_code; 4620 dst : E(write); 4621 src : R(read); 4622 A0 : R; 4623 A1 : R; 4624 %} 4625 4626 // Integer ALU imm operation 4627 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4628 single_instruction; 4629 dst : E(write); 4630 IALU : R; 4631 %} 4632 4633 // Integer ALU reg-reg with carry operation 4634 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4635 single_instruction; 4636 dst : E(write); 4637 src1 : R(read); 4638 src2 : R(read); 4639 IALU : R; 4640 %} 4641 4642 // Integer ALU cc operation 4643 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4644 single_instruction; 4645 dst : E(write); 4646 cc : R(read); 4647 IALU : R; 4648 %} 4649 4650 // Integer ALU cc / second IALU operation 4651 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4652 instruction_count(1); multiple_bundles; 4653 dst : E(write)+1; 4654 src : R(read); 4655 IALU : R; 4656 %} 4657 4658 // Integer ALU cc / second IALU operation 4659 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4660 instruction_count(1); multiple_bundles; 4661 dst : E(write)+1; 4662 p : R(read); 4663 q : R(read); 4664 IALU : R; 4665 %} 4666 4667 // Integer ALU hi-lo-reg operation 4668 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4669 instruction_count(1); multiple_bundles; 4670 dst : E(write)+1; 4671 IALU : R(2); 4672 %} 4673 4674 // Float ALU hi-lo-reg operation (with temp) 4675 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4676 instruction_count(1); multiple_bundles; 4677 dst : E(write)+1; 4678 IALU : R(2); 4679 %} 4680 4681 // Long Constant 4682 pipe_class loadConL( iRegL dst, immL src ) %{ 4683 instruction_count(2); multiple_bundles; 4684 dst : E(write)+1; 4685 IALU : R(2); 4686 IALU : R(2); 4687 %} 4688 4689 // Pointer Constant 4690 pipe_class loadConP( iRegP dst, immP src ) %{ 4691 instruction_count(0); multiple_bundles; 4692 fixed_latency(6); 4693 %} 4694 4695 // Polling Address 4696 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4697 #ifdef _LP64 4698 instruction_count(0); multiple_bundles; 4699 fixed_latency(6); 4700 #else 4701 dst : E(write); 4702 IALU : R; 4703 #endif 4704 %} 4705 4706 // Long Constant small 4707 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4708 instruction_count(2); 4709 dst : E(write); 4710 IALU : R; 4711 IALU : R; 4712 %} 4713 4714 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4715 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4716 instruction_count(1); multiple_bundles; 4717 src : R(read); 4718 dst : M(write)+1; 4719 IALU : R; 4720 MS : E; 4721 %} 4722 4723 // Integer ALU nop operation 4724 pipe_class ialu_nop() %{ 4725 single_instruction; 4726 IALU : R; 4727 %} 4728 4729 // Integer ALU nop operation 4730 pipe_class ialu_nop_A0() %{ 4731 single_instruction; 4732 A0 : R; 4733 %} 4734 4735 // Integer ALU nop operation 4736 pipe_class ialu_nop_A1() %{ 4737 single_instruction; 4738 A1 : R; 4739 %} 4740 4741 // Integer Multiply reg-reg operation 4742 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4743 single_instruction; 4744 dst : E(write); 4745 src1 : R(read); 4746 src2 : R(read); 4747 MS : R(5); 4748 %} 4749 4750 // Integer Multiply reg-imm operation 4751 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4752 single_instruction; 4753 dst : E(write); 4754 src1 : R(read); 4755 MS : R(5); 4756 %} 4757 4758 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4759 single_instruction; 4760 dst : E(write)+4; 4761 src1 : R(read); 4762 src2 : R(read); 4763 MS : R(6); 4764 %} 4765 4766 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4767 single_instruction; 4768 dst : E(write)+4; 4769 src1 : R(read); 4770 MS : R(6); 4771 %} 4772 4773 // Integer Divide reg-reg 4774 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4775 instruction_count(1); multiple_bundles; 4776 dst : E(write); 4777 temp : E(write); 4778 src1 : R(read); 4779 src2 : R(read); 4780 temp : R(read); 4781 MS : R(38); 4782 %} 4783 4784 // Integer Divide reg-imm 4785 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4786 instruction_count(1); multiple_bundles; 4787 dst : E(write); 4788 temp : E(write); 4789 src1 : R(read); 4790 temp : R(read); 4791 MS : R(38); 4792 %} 4793 4794 // Long Divide 4795 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4796 dst : E(write)+71; 4797 src1 : R(read); 4798 src2 : R(read)+1; 4799 MS : R(70); 4800 %} 4801 4802 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4803 dst : E(write)+71; 4804 src1 : R(read); 4805 MS : R(70); 4806 %} 4807 4808 // Floating Point Add Float 4809 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4810 single_instruction; 4811 dst : X(write); 4812 src1 : E(read); 4813 src2 : E(read); 4814 FA : R; 4815 %} 4816 4817 // Floating Point Add Double 4818 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4819 single_instruction; 4820 dst : X(write); 4821 src1 : E(read); 4822 src2 : E(read); 4823 FA : R; 4824 %} 4825 4826 // Floating Point Conditional Move based on integer flags 4827 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4828 single_instruction; 4829 dst : X(write); 4830 src : E(read); 4831 cr : R(read); 4832 FA : R(2); 4833 BR : R(2); 4834 %} 4835 4836 // Floating Point Conditional Move based on integer flags 4837 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4838 single_instruction; 4839 dst : X(write); 4840 src : E(read); 4841 cr : R(read); 4842 FA : R(2); 4843 BR : R(2); 4844 %} 4845 4846 // Floating Point Multiply Float 4847 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4848 single_instruction; 4849 dst : X(write); 4850 src1 : E(read); 4851 src2 : E(read); 4852 FM : R; 4853 %} 4854 4855 // Floating Point Multiply Double 4856 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4857 single_instruction; 4858 dst : X(write); 4859 src1 : E(read); 4860 src2 : E(read); 4861 FM : R; 4862 %} 4863 4864 // Floating Point Divide Float 4865 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4866 single_instruction; 4867 dst : X(write); 4868 src1 : E(read); 4869 src2 : E(read); 4870 FM : R; 4871 FDIV : C(14); 4872 %} 4873 4874 // Floating Point Divide Double 4875 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4876 single_instruction; 4877 dst : X(write); 4878 src1 : E(read); 4879 src2 : E(read); 4880 FM : R; 4881 FDIV : C(17); 4882 %} 4883 4884 // Floating Point Move/Negate/Abs Float 4885 pipe_class faddF_reg(regF dst, regF src) %{ 4886 single_instruction; 4887 dst : W(write); 4888 src : E(read); 4889 FA : R(1); 4890 %} 4891 4892 // Floating Point Move/Negate/Abs Double 4893 pipe_class faddD_reg(regD dst, regD src) %{ 4894 single_instruction; 4895 dst : W(write); 4896 src : E(read); 4897 FA : R; 4898 %} 4899 4900 // Floating Point Convert F->D 4901 pipe_class fcvtF2D(regD dst, regF src) %{ 4902 single_instruction; 4903 dst : X(write); 4904 src : E(read); 4905 FA : R; 4906 %} 4907 4908 // Floating Point Convert I->D 4909 pipe_class fcvtI2D(regD dst, regF src) %{ 4910 single_instruction; 4911 dst : X(write); 4912 src : E(read); 4913 FA : R; 4914 %} 4915 4916 // Floating Point Convert LHi->D 4917 pipe_class fcvtLHi2D(regD dst, regD src) %{ 4918 single_instruction; 4919 dst : X(write); 4920 src : E(read); 4921 FA : R; 4922 %} 4923 4924 // Floating Point Convert L->D 4925 pipe_class fcvtL2D(regD dst, regF src) %{ 4926 single_instruction; 4927 dst : X(write); 4928 src : E(read); 4929 FA : R; 4930 %} 4931 4932 // Floating Point Convert L->F 4933 pipe_class fcvtL2F(regD dst, regF src) %{ 4934 single_instruction; 4935 dst : X(write); 4936 src : E(read); 4937 FA : R; 4938 %} 4939 4940 // Floating Point Convert D->F 4941 pipe_class fcvtD2F(regD dst, regF src) %{ 4942 single_instruction; 4943 dst : X(write); 4944 src : E(read); 4945 FA : R; 4946 %} 4947 4948 // Floating Point Convert I->L 4949 pipe_class fcvtI2L(regD dst, regF src) %{ 4950 single_instruction; 4951 dst : X(write); 4952 src : E(read); 4953 FA : R; 4954 %} 4955 4956 // Floating Point Convert D->F 4957 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 4958 instruction_count(1); multiple_bundles; 4959 dst : X(write)+6; 4960 src : E(read); 4961 FA : R; 4962 %} 4963 4964 // Floating Point Convert D->L 4965 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 4966 instruction_count(1); multiple_bundles; 4967 dst : X(write)+6; 4968 src : E(read); 4969 FA : R; 4970 %} 4971 4972 // Floating Point Convert F->I 4973 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 4974 instruction_count(1); multiple_bundles; 4975 dst : X(write)+6; 4976 src : E(read); 4977 FA : R; 4978 %} 4979 4980 // Floating Point Convert F->L 4981 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 4982 instruction_count(1); multiple_bundles; 4983 dst : X(write)+6; 4984 src : E(read); 4985 FA : R; 4986 %} 4987 4988 // Floating Point Convert I->F 4989 pipe_class fcvtI2F(regF dst, regF src) %{ 4990 single_instruction; 4991 dst : X(write); 4992 src : E(read); 4993 FA : R; 4994 %} 4995 4996 // Floating Point Compare 4997 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 4998 single_instruction; 4999 cr : X(write); 5000 src1 : E(read); 5001 src2 : E(read); 5002 FA : R; 5003 %} 5004 5005 // Floating Point Compare 5006 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 5007 single_instruction; 5008 cr : X(write); 5009 src1 : E(read); 5010 src2 : E(read); 5011 FA : R; 5012 %} 5013 5014 // Floating Add Nop 5015 pipe_class fadd_nop() %{ 5016 single_instruction; 5017 FA : R; 5018 %} 5019 5020 // Integer Store to Memory 5021 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 5022 single_instruction; 5023 mem : R(read); 5024 src : C(read); 5025 MS : R; 5026 %} 5027 5028 // Integer Store to Memory 5029 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 5030 single_instruction; 5031 mem : R(read); 5032 src : C(read); 5033 MS : R; 5034 %} 5035 5036 // Integer Store Zero to Memory 5037 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 5038 single_instruction; 5039 mem : R(read); 5040 MS : R; 5041 %} 5042 5043 // Special Stack Slot Store 5044 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 5045 single_instruction; 5046 stkSlot : R(read); 5047 src : C(read); 5048 MS : R; 5049 %} 5050 5051 // Special Stack Slot Store 5052 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 5053 instruction_count(2); multiple_bundles; 5054 stkSlot : R(read); 5055 src : C(read); 5056 MS : R(2); 5057 %} 5058 5059 // Float Store 5060 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 5061 single_instruction; 5062 mem : R(read); 5063 src : C(read); 5064 MS : R; 5065 %} 5066 5067 // Float Store 5068 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 5069 single_instruction; 5070 mem : R(read); 5071 MS : R; 5072 %} 5073 5074 // Double Store 5075 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 5076 instruction_count(1); 5077 mem : R(read); 5078 src : C(read); 5079 MS : R; 5080 %} 5081 5082 // Double Store 5083 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 5084 single_instruction; 5085 mem : R(read); 5086 MS : R; 5087 %} 5088 5089 // Special Stack Slot Float Store 5090 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 5091 single_instruction; 5092 stkSlot : R(read); 5093 src : C(read); 5094 MS : R; 5095 %} 5096 5097 // Special Stack Slot Double Store 5098 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 5099 single_instruction; 5100 stkSlot : R(read); 5101 src : C(read); 5102 MS : R; 5103 %} 5104 5105 // Integer Load (when sign bit propagation not needed) 5106 pipe_class iload_mem(iRegI dst, memory mem) %{ 5107 single_instruction; 5108 mem : R(read); 5109 dst : C(write); 5110 MS : R; 5111 %} 5112 5113 // Integer Load from stack operand 5114 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 5115 single_instruction; 5116 mem : R(read); 5117 dst : C(write); 5118 MS : R; 5119 %} 5120 5121 // Integer Load (when sign bit propagation or masking is needed) 5122 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 5123 single_instruction; 5124 mem : R(read); 5125 dst : M(write); 5126 MS : R; 5127 %} 5128 5129 // Float Load 5130 pipe_class floadF_mem(regF dst, memory mem) %{ 5131 single_instruction; 5132 mem : R(read); 5133 dst : M(write); 5134 MS : R; 5135 %} 5136 5137 // Float Load 5138 pipe_class floadD_mem(regD dst, memory mem) %{ 5139 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 5140 mem : R(read); 5141 dst : M(write); 5142 MS : R; 5143 %} 5144 5145 // Float Load 5146 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 5147 single_instruction; 5148 stkSlot : R(read); 5149 dst : M(write); 5150 MS : R; 5151 %} 5152 5153 // Float Load 5154 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 5155 single_instruction; 5156 stkSlot : R(read); 5157 dst : M(write); 5158 MS : R; 5159 %} 5160 5161 // Memory Nop 5162 pipe_class mem_nop() %{ 5163 single_instruction; 5164 MS : R; 5165 %} 5166 5167 pipe_class sethi(iRegP dst, immI src) %{ 5168 single_instruction; 5169 dst : E(write); 5170 IALU : R; 5171 %} 5172 5173 pipe_class loadPollP(iRegP poll) %{ 5174 single_instruction; 5175 poll : R(read); 5176 MS : R; 5177 %} 5178 5179 pipe_class br(Universe br, label labl) %{ 5180 single_instruction_with_delay_slot; 5181 BR : R; 5182 %} 5183 5184 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 5185 single_instruction_with_delay_slot; 5186 cr : E(read); 5187 BR : R; 5188 %} 5189 5190 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 5191 single_instruction_with_delay_slot; 5192 op1 : E(read); 5193 BR : R; 5194 MS : R; 5195 %} 5196 5197 // Compare and branch 5198 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 5199 instruction_count(2); has_delay_slot; 5200 cr : E(write); 5201 src1 : R(read); 5202 src2 : R(read); 5203 IALU : R; 5204 BR : R; 5205 %} 5206 5207 // Compare and branch 5208 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5209 instruction_count(2); has_delay_slot; 5210 cr : E(write); 5211 src1 : R(read); 5212 IALU : R; 5213 BR : R; 5214 %} 5215 5216 // Compare and branch using cbcond 5217 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5218 single_instruction; 5219 src1 : E(read); 5220 src2 : E(read); 5221 IALU : R; 5222 BR : R; 5223 %} 5224 5225 // Compare and branch using cbcond 5226 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5227 single_instruction; 5228 src1 : E(read); 5229 IALU : R; 5230 BR : R; 5231 %} 5232 5233 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5234 single_instruction_with_delay_slot; 5235 cr : E(read); 5236 BR : R; 5237 %} 5238 5239 pipe_class br_nop() %{ 5240 single_instruction; 5241 BR : R; 5242 %} 5243 5244 pipe_class simple_call(method meth) %{ 5245 instruction_count(2); multiple_bundles; force_serialization; 5246 fixed_latency(100); 5247 BR : R(1); 5248 MS : R(1); 5249 A0 : R(1); 5250 %} 5251 5252 pipe_class compiled_call(method meth) %{ 5253 instruction_count(1); multiple_bundles; force_serialization; 5254 fixed_latency(100); 5255 MS : R(1); 5256 %} 5257 5258 pipe_class call(method meth) %{ 5259 instruction_count(0); multiple_bundles; force_serialization; 5260 fixed_latency(100); 5261 %} 5262 5263 pipe_class tail_call(Universe ignore, label labl) %{ 5264 single_instruction; has_delay_slot; 5265 fixed_latency(100); 5266 BR : R(1); 5267 MS : R(1); 5268 %} 5269 5270 pipe_class ret(Universe ignore) %{ 5271 single_instruction; has_delay_slot; 5272 BR : R(1); 5273 MS : R(1); 5274 %} 5275 5276 pipe_class ret_poll(g3RegP poll) %{ 5277 instruction_count(3); has_delay_slot; 5278 poll : E(read); 5279 MS : R; 5280 %} 5281 5282 // The real do-nothing guy 5283 pipe_class empty( ) %{ 5284 instruction_count(0); 5285 %} 5286 5287 pipe_class long_memory_op() %{ 5288 instruction_count(0); multiple_bundles; force_serialization; 5289 fixed_latency(25); 5290 MS : R(1); 5291 %} 5292 5293 // Check-cast 5294 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5295 array : R(read); 5296 match : R(read); 5297 IALU : R(2); 5298 BR : R(2); 5299 MS : R; 5300 %} 5301 5302 // Convert FPU flags into +1,0,-1 5303 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5304 src1 : E(read); 5305 src2 : E(read); 5306 dst : E(write); 5307 FA : R; 5308 MS : R(2); 5309 BR : R(2); 5310 %} 5311 5312 // Compare for p < q, and conditionally add y 5313 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5314 p : E(read); 5315 q : E(read); 5316 y : E(read); 5317 IALU : R(3) 5318 %} 5319 5320 // Perform a compare, then move conditionally in a branch delay slot. 5321 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5322 src2 : E(read); 5323 srcdst : E(read); 5324 IALU : R; 5325 BR : R; 5326 %} 5327 5328 // Define the class for the Nop node 5329 define %{ 5330 MachNop = ialu_nop; 5331 %} 5332 5333 %} 5334 5335 //----------INSTRUCTIONS------------------------------------------------------- 5336 5337 //------------Special Stack Slot instructions - no match rules----------------- 5338 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5339 // No match rule to avoid chain rule match. 5340 effect(DEF dst, USE src); 5341 ins_cost(MEMORY_REF_COST); 5342 size(4); 5343 format %{ "LDF $src,$dst\t! stkI to regF" %} 5344 opcode(Assembler::ldf_op3); 5345 ins_encode(simple_form3_mem_reg(src, dst)); 5346 ins_pipe(floadF_stk); 5347 %} 5348 5349 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5350 // No match rule to avoid chain rule match. 5351 effect(DEF dst, USE src); 5352 ins_cost(MEMORY_REF_COST); 5353 size(4); 5354 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5355 opcode(Assembler::lddf_op3); 5356 ins_encode(simple_form3_mem_reg(src, dst)); 5357 ins_pipe(floadD_stk); 5358 %} 5359 5360 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5361 // No match rule to avoid chain rule match. 5362 effect(DEF dst, USE src); 5363 ins_cost(MEMORY_REF_COST); 5364 size(4); 5365 format %{ "STF $src,$dst\t! regF to stkI" %} 5366 opcode(Assembler::stf_op3); 5367 ins_encode(simple_form3_mem_reg(dst, src)); 5368 ins_pipe(fstoreF_stk_reg); 5369 %} 5370 5371 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5372 // No match rule to avoid chain rule match. 5373 effect(DEF dst, USE src); 5374 ins_cost(MEMORY_REF_COST); 5375 size(4); 5376 format %{ "STDF $src,$dst\t! regD to stkL" %} 5377 opcode(Assembler::stdf_op3); 5378 ins_encode(simple_form3_mem_reg(dst, src)); 5379 ins_pipe(fstoreD_stk_reg); 5380 %} 5381 5382 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5383 effect(DEF dst, USE src); 5384 ins_cost(MEMORY_REF_COST*2); 5385 size(8); 5386 format %{ "STW $src,$dst.hi\t! long\n\t" 5387 "STW R_G0,$dst.lo" %} 5388 opcode(Assembler::stw_op3); 5389 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5390 ins_pipe(lstoreI_stk_reg); 5391 %} 5392 5393 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5394 // No match rule to avoid chain rule match. 5395 effect(DEF dst, USE src); 5396 ins_cost(MEMORY_REF_COST); 5397 size(4); 5398 format %{ "STX $src,$dst\t! regL to stkD" %} 5399 opcode(Assembler::stx_op3); 5400 ins_encode(simple_form3_mem_reg( dst, src ) ); 5401 ins_pipe(istore_stk_reg); 5402 %} 5403 5404 //---------- Chain stack slots between similar types -------- 5405 5406 // Load integer from stack slot 5407 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5408 match(Set dst src); 5409 ins_cost(MEMORY_REF_COST); 5410 5411 size(4); 5412 format %{ "LDUW $src,$dst\t!stk" %} 5413 opcode(Assembler::lduw_op3); 5414 ins_encode(simple_form3_mem_reg( src, dst ) ); 5415 ins_pipe(iload_mem); 5416 %} 5417 5418 // Store integer to stack slot 5419 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5420 match(Set dst src); 5421 ins_cost(MEMORY_REF_COST); 5422 5423 size(4); 5424 format %{ "STW $src,$dst\t!stk" %} 5425 opcode(Assembler::stw_op3); 5426 ins_encode(simple_form3_mem_reg( dst, src ) ); 5427 ins_pipe(istore_mem_reg); 5428 %} 5429 5430 // Load long from stack slot 5431 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5432 match(Set dst src); 5433 5434 ins_cost(MEMORY_REF_COST); 5435 size(4); 5436 format %{ "LDX $src,$dst\t! long" %} 5437 opcode(Assembler::ldx_op3); 5438 ins_encode(simple_form3_mem_reg( src, dst ) ); 5439 ins_pipe(iload_mem); 5440 %} 5441 5442 // Store long to stack slot 5443 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5444 match(Set dst src); 5445 5446 ins_cost(MEMORY_REF_COST); 5447 size(4); 5448 format %{ "STX $src,$dst\t! long" %} 5449 opcode(Assembler::stx_op3); 5450 ins_encode(simple_form3_mem_reg( dst, src ) ); 5451 ins_pipe(istore_mem_reg); 5452 %} 5453 5454 #ifdef _LP64 5455 // Load pointer from stack slot, 64-bit encoding 5456 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5457 match(Set dst src); 5458 ins_cost(MEMORY_REF_COST); 5459 size(4); 5460 format %{ "LDX $src,$dst\t!ptr" %} 5461 opcode(Assembler::ldx_op3); 5462 ins_encode(simple_form3_mem_reg( src, dst ) ); 5463 ins_pipe(iload_mem); 5464 %} 5465 5466 // Store pointer to stack slot 5467 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5468 match(Set dst src); 5469 ins_cost(MEMORY_REF_COST); 5470 size(4); 5471 format %{ "STX $src,$dst\t!ptr" %} 5472 opcode(Assembler::stx_op3); 5473 ins_encode(simple_form3_mem_reg( dst, src ) ); 5474 ins_pipe(istore_mem_reg); 5475 %} 5476 #else // _LP64 5477 // Load pointer from stack slot, 32-bit encoding 5478 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5479 match(Set dst src); 5480 ins_cost(MEMORY_REF_COST); 5481 format %{ "LDUW $src,$dst\t!ptr" %} 5482 opcode(Assembler::lduw_op3, Assembler::ldst_op); 5483 ins_encode(simple_form3_mem_reg( src, dst ) ); 5484 ins_pipe(iload_mem); 5485 %} 5486 5487 // Store pointer to stack slot 5488 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5489 match(Set dst src); 5490 ins_cost(MEMORY_REF_COST); 5491 format %{ "STW $src,$dst\t!ptr" %} 5492 opcode(Assembler::stw_op3, Assembler::ldst_op); 5493 ins_encode(simple_form3_mem_reg( dst, src ) ); 5494 ins_pipe(istore_mem_reg); 5495 %} 5496 #endif // _LP64 5497 5498 //------------Special Nop instructions for bundling - no match rules----------- 5499 // Nop using the A0 functional unit 5500 instruct Nop_A0() %{ 5501 ins_cost(0); 5502 5503 format %{ "NOP ! Alu Pipeline" %} 5504 opcode(Assembler::or_op3, Assembler::arith_op); 5505 ins_encode( form2_nop() ); 5506 ins_pipe(ialu_nop_A0); 5507 %} 5508 5509 // Nop using the A1 functional unit 5510 instruct Nop_A1( ) %{ 5511 ins_cost(0); 5512 5513 format %{ "NOP ! Alu Pipeline" %} 5514 opcode(Assembler::or_op3, Assembler::arith_op); 5515 ins_encode( form2_nop() ); 5516 ins_pipe(ialu_nop_A1); 5517 %} 5518 5519 // Nop using the memory functional unit 5520 instruct Nop_MS( ) %{ 5521 ins_cost(0); 5522 5523 format %{ "NOP ! Memory Pipeline" %} 5524 ins_encode( emit_mem_nop ); 5525 ins_pipe(mem_nop); 5526 %} 5527 5528 // Nop using the floating add functional unit 5529 instruct Nop_FA( ) %{ 5530 ins_cost(0); 5531 5532 format %{ "NOP ! Floating Add Pipeline" %} 5533 ins_encode( emit_fadd_nop ); 5534 ins_pipe(fadd_nop); 5535 %} 5536 5537 // Nop using the branch functional unit 5538 instruct Nop_BR( ) %{ 5539 ins_cost(0); 5540 5541 format %{ "NOP ! Branch Pipeline" %} 5542 ins_encode( emit_br_nop ); 5543 ins_pipe(br_nop); 5544 %} 5545 5546 //----------Load/Store/Move Instructions--------------------------------------- 5547 //----------Load Instructions-------------------------------------------------- 5548 // Load Byte (8bit signed) 5549 instruct loadB(iRegI dst, memory mem) %{ 5550 match(Set dst (LoadB mem)); 5551 ins_cost(MEMORY_REF_COST); 5552 5553 size(4); 5554 format %{ "LDSB $mem,$dst\t! byte" %} 5555 ins_encode %{ 5556 __ ldsb($mem$$Address, $dst$$Register); 5557 %} 5558 ins_pipe(iload_mask_mem); 5559 %} 5560 5561 // Load Byte (8bit signed) into a Long Register 5562 instruct loadB2L(iRegL dst, memory mem) %{ 5563 match(Set dst (ConvI2L (LoadB mem))); 5564 ins_cost(MEMORY_REF_COST); 5565 5566 size(4); 5567 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5568 ins_encode %{ 5569 __ ldsb($mem$$Address, $dst$$Register); 5570 %} 5571 ins_pipe(iload_mask_mem); 5572 %} 5573 5574 // Load Unsigned Byte (8bit UNsigned) into an int reg 5575 instruct loadUB(iRegI dst, memory mem) %{ 5576 match(Set dst (LoadUB mem)); 5577 ins_cost(MEMORY_REF_COST); 5578 5579 size(4); 5580 format %{ "LDUB $mem,$dst\t! ubyte" %} 5581 ins_encode %{ 5582 __ ldub($mem$$Address, $dst$$Register); 5583 %} 5584 ins_pipe(iload_mem); 5585 %} 5586 5587 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5588 instruct loadUB2L(iRegL dst, memory mem) %{ 5589 match(Set dst (ConvI2L (LoadUB mem))); 5590 ins_cost(MEMORY_REF_COST); 5591 5592 size(4); 5593 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5594 ins_encode %{ 5595 __ ldub($mem$$Address, $dst$$Register); 5596 %} 5597 ins_pipe(iload_mem); 5598 %} 5599 5600 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register 5601 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{ 5602 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5603 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5604 5605 size(2*4); 5606 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t" 5607 "AND $dst,$mask,$dst" %} 5608 ins_encode %{ 5609 __ ldub($mem$$Address, $dst$$Register); 5610 __ and3($dst$$Register, $mask$$constant, $dst$$Register); 5611 %} 5612 ins_pipe(iload_mem); 5613 %} 5614 5615 // Load Short (16bit signed) 5616 instruct loadS(iRegI dst, memory mem) %{ 5617 match(Set dst (LoadS mem)); 5618 ins_cost(MEMORY_REF_COST); 5619 5620 size(4); 5621 format %{ "LDSH $mem,$dst\t! short" %} 5622 ins_encode %{ 5623 __ ldsh($mem$$Address, $dst$$Register); 5624 %} 5625 ins_pipe(iload_mask_mem); 5626 %} 5627 5628 // Load Short (16 bit signed) to Byte (8 bit signed) 5629 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5630 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5631 ins_cost(MEMORY_REF_COST); 5632 5633 size(4); 5634 5635 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5636 ins_encode %{ 5637 __ ldsb($mem$$Address, $dst$$Register, 1); 5638 %} 5639 ins_pipe(iload_mask_mem); 5640 %} 5641 5642 // Load Short (16bit signed) into a Long Register 5643 instruct loadS2L(iRegL dst, memory mem) %{ 5644 match(Set dst (ConvI2L (LoadS mem))); 5645 ins_cost(MEMORY_REF_COST); 5646 5647 size(4); 5648 format %{ "LDSH $mem,$dst\t! short -> long" %} 5649 ins_encode %{ 5650 __ ldsh($mem$$Address, $dst$$Register); 5651 %} 5652 ins_pipe(iload_mask_mem); 5653 %} 5654 5655 // Load Unsigned Short/Char (16bit UNsigned) 5656 instruct loadUS(iRegI dst, memory mem) %{ 5657 match(Set dst (LoadUS mem)); 5658 ins_cost(MEMORY_REF_COST); 5659 5660 size(4); 5661 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5662 ins_encode %{ 5663 __ lduh($mem$$Address, $dst$$Register); 5664 %} 5665 ins_pipe(iload_mem); 5666 %} 5667 5668 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5669 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5670 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5671 ins_cost(MEMORY_REF_COST); 5672 5673 size(4); 5674 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5675 ins_encode %{ 5676 __ ldsb($mem$$Address, $dst$$Register, 1); 5677 %} 5678 ins_pipe(iload_mask_mem); 5679 %} 5680 5681 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5682 instruct loadUS2L(iRegL dst, memory mem) %{ 5683 match(Set dst (ConvI2L (LoadUS mem))); 5684 ins_cost(MEMORY_REF_COST); 5685 5686 size(4); 5687 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5688 ins_encode %{ 5689 __ lduh($mem$$Address, $dst$$Register); 5690 %} 5691 ins_pipe(iload_mem); 5692 %} 5693 5694 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5695 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5696 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5697 ins_cost(MEMORY_REF_COST); 5698 5699 size(4); 5700 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5701 ins_encode %{ 5702 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5703 %} 5704 ins_pipe(iload_mem); 5705 %} 5706 5707 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5708 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5709 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5710 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5711 5712 size(2*4); 5713 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5714 "AND $dst,$mask,$dst" %} 5715 ins_encode %{ 5716 Register Rdst = $dst$$Register; 5717 __ lduh($mem$$Address, Rdst); 5718 __ and3(Rdst, $mask$$constant, Rdst); 5719 %} 5720 ins_pipe(iload_mem); 5721 %} 5722 5723 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register 5724 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{ 5725 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5726 effect(TEMP dst, TEMP tmp); 5727 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5728 5729 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t" 5730 "SET $mask,$tmp\n\t" 5731 "AND $dst,$tmp,$dst" %} 5732 ins_encode %{ 5733 Register Rdst = $dst$$Register; 5734 Register Rtmp = $tmp$$Register; 5735 __ lduh($mem$$Address, Rdst); 5736 __ set($mask$$constant, Rtmp); 5737 __ and3(Rdst, Rtmp, Rdst); 5738 %} 5739 ins_pipe(iload_mem); 5740 %} 5741 5742 // Load Integer 5743 instruct loadI(iRegI dst, memory mem) %{ 5744 match(Set dst (LoadI mem)); 5745 ins_cost(MEMORY_REF_COST); 5746 5747 size(4); 5748 format %{ "LDUW $mem,$dst\t! int" %} 5749 ins_encode %{ 5750 __ lduw($mem$$Address, $dst$$Register); 5751 %} 5752 ins_pipe(iload_mem); 5753 %} 5754 5755 // Load Integer to Byte (8 bit signed) 5756 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5757 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5758 ins_cost(MEMORY_REF_COST); 5759 5760 size(4); 5761 5762 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5763 ins_encode %{ 5764 __ ldsb($mem$$Address, $dst$$Register, 3); 5765 %} 5766 ins_pipe(iload_mask_mem); 5767 %} 5768 5769 // Load Integer to Unsigned Byte (8 bit UNsigned) 5770 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5771 match(Set dst (AndI (LoadI mem) mask)); 5772 ins_cost(MEMORY_REF_COST); 5773 5774 size(4); 5775 5776 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5777 ins_encode %{ 5778 __ ldub($mem$$Address, $dst$$Register, 3); 5779 %} 5780 ins_pipe(iload_mask_mem); 5781 %} 5782 5783 // Load Integer to Short (16 bit signed) 5784 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5785 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5786 ins_cost(MEMORY_REF_COST); 5787 5788 size(4); 5789 5790 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5791 ins_encode %{ 5792 __ ldsh($mem$$Address, $dst$$Register, 2); 5793 %} 5794 ins_pipe(iload_mask_mem); 5795 %} 5796 5797 // Load Integer to Unsigned Short (16 bit UNsigned) 5798 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5799 match(Set dst (AndI (LoadI mem) mask)); 5800 ins_cost(MEMORY_REF_COST); 5801 5802 size(4); 5803 5804 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5805 ins_encode %{ 5806 __ lduh($mem$$Address, $dst$$Register, 2); 5807 %} 5808 ins_pipe(iload_mask_mem); 5809 %} 5810 5811 // Load Integer into a Long Register 5812 instruct loadI2L(iRegL dst, memory mem) %{ 5813 match(Set dst (ConvI2L (LoadI mem))); 5814 ins_cost(MEMORY_REF_COST); 5815 5816 size(4); 5817 format %{ "LDSW $mem,$dst\t! int -> long" %} 5818 ins_encode %{ 5819 __ ldsw($mem$$Address, $dst$$Register); 5820 %} 5821 ins_pipe(iload_mask_mem); 5822 %} 5823 5824 // Load Integer with mask 0xFF into a Long Register 5825 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5826 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5827 ins_cost(MEMORY_REF_COST); 5828 5829 size(4); 5830 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5831 ins_encode %{ 5832 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5833 %} 5834 ins_pipe(iload_mem); 5835 %} 5836 5837 // Load Integer with mask 0xFFFF into a Long Register 5838 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5839 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5840 ins_cost(MEMORY_REF_COST); 5841 5842 size(4); 5843 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5844 ins_encode %{ 5845 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5846 %} 5847 ins_pipe(iload_mem); 5848 %} 5849 5850 // Load Integer with a 13-bit mask into a Long Register 5851 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5852 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5853 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5854 5855 size(2*4); 5856 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t" 5857 "AND $dst,$mask,$dst" %} 5858 ins_encode %{ 5859 Register Rdst = $dst$$Register; 5860 __ lduw($mem$$Address, Rdst); 5861 __ and3(Rdst, $mask$$constant, Rdst); 5862 %} 5863 ins_pipe(iload_mem); 5864 %} 5865 5866 // Load Integer with a 32-bit mask into a Long Register 5867 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5868 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5869 effect(TEMP dst, TEMP tmp); 5870 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5871 5872 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t" 5873 "SET $mask,$tmp\n\t" 5874 "AND $dst,$tmp,$dst" %} 5875 ins_encode %{ 5876 Register Rdst = $dst$$Register; 5877 Register Rtmp = $tmp$$Register; 5878 __ lduw($mem$$Address, Rdst); 5879 __ set($mask$$constant, Rtmp); 5880 __ and3(Rdst, Rtmp, Rdst); 5881 %} 5882 ins_pipe(iload_mem); 5883 %} 5884 5885 // Load Unsigned Integer into a Long Register 5886 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{ 5887 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5888 ins_cost(MEMORY_REF_COST); 5889 5890 size(4); 5891 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5892 ins_encode %{ 5893 __ lduw($mem$$Address, $dst$$Register); 5894 %} 5895 ins_pipe(iload_mem); 5896 %} 5897 5898 // Load Long - aligned 5899 instruct loadL(iRegL dst, memory mem ) %{ 5900 match(Set dst (LoadL mem)); 5901 ins_cost(MEMORY_REF_COST); 5902 5903 size(4); 5904 format %{ "LDX $mem,$dst\t! long" %} 5905 ins_encode %{ 5906 __ ldx($mem$$Address, $dst$$Register); 5907 %} 5908 ins_pipe(iload_mem); 5909 %} 5910 5911 // Load Long - UNaligned 5912 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5913 match(Set dst (LoadL_unaligned mem)); 5914 effect(KILL tmp); 5915 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5916 size(16); 5917 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 5918 "\tLDUW $mem ,$dst\n" 5919 "\tSLLX #32, $dst, $dst\n" 5920 "\tOR $dst, R_O7, $dst" %} 5921 opcode(Assembler::lduw_op3); 5922 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 5923 ins_pipe(iload_mem); 5924 %} 5925 5926 // Load Range 5927 instruct loadRange(iRegI dst, memory mem) %{ 5928 match(Set dst (LoadRange mem)); 5929 ins_cost(MEMORY_REF_COST); 5930 5931 size(4); 5932 format %{ "LDUW $mem,$dst\t! range" %} 5933 opcode(Assembler::lduw_op3); 5934 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5935 ins_pipe(iload_mem); 5936 %} 5937 5938 // Load Integer into %f register (for fitos/fitod) 5939 instruct loadI_freg(regF dst, memory mem) %{ 5940 match(Set dst (LoadI mem)); 5941 ins_cost(MEMORY_REF_COST); 5942 size(4); 5943 5944 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 5945 opcode(Assembler::ldf_op3); 5946 ins_encode(simple_form3_mem_reg( mem, dst ) ); 5947 ins_pipe(floadF_mem); 5948 %} 5949 5950 // Load Pointer 5951 instruct loadP(iRegP dst, memory mem) %{ 5952 match(Set dst (LoadP mem)); 5953 ins_cost(MEMORY_REF_COST); 5954 size(4); 5955 5956 #ifndef _LP64 5957 format %{ "LDUW $mem,$dst\t! ptr" %} 5958 ins_encode %{ 5959 __ lduw($mem$$Address, $dst$$Register); 5960 %} 5961 #else 5962 format %{ "LDX $mem,$dst\t! ptr" %} 5963 ins_encode %{ 5964 __ ldx($mem$$Address, $dst$$Register); 5965 %} 5966 #endif 5967 ins_pipe(iload_mem); 5968 %} 5969 5970 // Load Compressed Pointer 5971 instruct loadN(iRegN dst, memory mem) %{ 5972 match(Set dst (LoadN mem)); 5973 ins_cost(MEMORY_REF_COST); 5974 size(4); 5975 5976 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 5977 ins_encode %{ 5978 __ lduw($mem$$Address, $dst$$Register); 5979 %} 5980 ins_pipe(iload_mem); 5981 %} 5982 5983 // Load Klass Pointer 5984 instruct loadKlass(iRegP dst, memory mem) %{ 5985 match(Set dst (LoadKlass mem)); 5986 ins_cost(MEMORY_REF_COST); 5987 size(4); 5988 5989 #ifndef _LP64 5990 format %{ "LDUW $mem,$dst\t! klass ptr" %} 5991 ins_encode %{ 5992 __ lduw($mem$$Address, $dst$$Register); 5993 %} 5994 #else 5995 format %{ "LDX $mem,$dst\t! klass ptr" %} 5996 ins_encode %{ 5997 __ ldx($mem$$Address, $dst$$Register); 5998 %} 5999 #endif 6000 ins_pipe(iload_mem); 6001 %} 6002 6003 // Load narrow Klass Pointer 6004 instruct loadNKlass(iRegN dst, memory mem) %{ 6005 match(Set dst (LoadNKlass mem)); 6006 ins_cost(MEMORY_REF_COST); 6007 size(4); 6008 6009 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 6010 ins_encode %{ 6011 __ lduw($mem$$Address, $dst$$Register); 6012 %} 6013 ins_pipe(iload_mem); 6014 %} 6015 6016 // Load Double 6017 instruct loadD(regD dst, memory mem) %{ 6018 match(Set dst (LoadD mem)); 6019 ins_cost(MEMORY_REF_COST); 6020 6021 size(4); 6022 format %{ "LDDF $mem,$dst" %} 6023 opcode(Assembler::lddf_op3); 6024 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6025 ins_pipe(floadD_mem); 6026 %} 6027 6028 // Load Double - UNaligned 6029 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 6030 match(Set dst (LoadD_unaligned mem)); 6031 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 6032 size(8); 6033 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 6034 "\tLDF $mem+4,$dst.lo\t!" %} 6035 opcode(Assembler::ldf_op3); 6036 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 6037 ins_pipe(iload_mem); 6038 %} 6039 6040 // Load Float 6041 instruct loadF(regF dst, memory mem) %{ 6042 match(Set dst (LoadF mem)); 6043 ins_cost(MEMORY_REF_COST); 6044 6045 size(4); 6046 format %{ "LDF $mem,$dst" %} 6047 opcode(Assembler::ldf_op3); 6048 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6049 ins_pipe(floadF_mem); 6050 %} 6051 6052 // Load Constant 6053 instruct loadConI( iRegI dst, immI src ) %{ 6054 match(Set dst src); 6055 ins_cost(DEFAULT_COST * 3/2); 6056 format %{ "SET $src,$dst" %} 6057 ins_encode( Set32(src, dst) ); 6058 ins_pipe(ialu_hi_lo_reg); 6059 %} 6060 6061 instruct loadConI13( iRegI dst, immI13 src ) %{ 6062 match(Set dst src); 6063 6064 size(4); 6065 format %{ "MOV $src,$dst" %} 6066 ins_encode( Set13( src, dst ) ); 6067 ins_pipe(ialu_imm); 6068 %} 6069 6070 #ifndef _LP64 6071 instruct loadConP(iRegP dst, immP con) %{ 6072 match(Set dst con); 6073 ins_cost(DEFAULT_COST * 3/2); 6074 format %{ "SET $con,$dst\t!ptr" %} 6075 ins_encode %{ 6076 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6077 intptr_t val = $con$$constant; 6078 if (constant_reloc == relocInfo::oop_type) { 6079 __ set_oop_constant((jobject) val, $dst$$Register); 6080 } else if (constant_reloc == relocInfo::metadata_type) { 6081 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6082 } else { // non-oop pointers, e.g. card mark base, heap top 6083 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6084 __ set(val, $dst$$Register); 6085 } 6086 %} 6087 ins_pipe(loadConP); 6088 %} 6089 #else 6090 instruct loadConP_set(iRegP dst, immP_set con) %{ 6091 match(Set dst con); 6092 ins_cost(DEFAULT_COST * 3/2); 6093 format %{ "SET $con,$dst\t! ptr" %} 6094 ins_encode %{ 6095 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6096 intptr_t val = $con$$constant; 6097 if (constant_reloc == relocInfo::oop_type) { 6098 __ set_oop_constant((jobject) val, $dst$$Register); 6099 } else if (constant_reloc == relocInfo::metadata_type) { 6100 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6101 } else { // non-oop pointers, e.g. card mark base, heap top 6102 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6103 __ set(val, $dst$$Register); 6104 } 6105 %} 6106 ins_pipe(loadConP); 6107 %} 6108 6109 instruct loadConP_load(iRegP dst, immP_load con) %{ 6110 match(Set dst con); 6111 ins_cost(MEMORY_REF_COST); 6112 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 6113 ins_encode %{ 6114 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6115 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 6116 %} 6117 ins_pipe(loadConP); 6118 %} 6119 6120 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 6121 match(Set dst con); 6122 ins_cost(DEFAULT_COST * 3/2); 6123 format %{ "SET $con,$dst\t! non-oop ptr" %} 6124 ins_encode %{ 6125 __ set($con$$constant, $dst$$Register); 6126 %} 6127 ins_pipe(loadConP); 6128 %} 6129 #endif // _LP64 6130 6131 instruct loadConP0(iRegP dst, immP0 src) %{ 6132 match(Set dst src); 6133 6134 size(4); 6135 format %{ "CLR $dst\t!ptr" %} 6136 ins_encode %{ 6137 __ clr($dst$$Register); 6138 %} 6139 ins_pipe(ialu_imm); 6140 %} 6141 6142 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 6143 match(Set dst src); 6144 ins_cost(DEFAULT_COST); 6145 format %{ "SET $src,$dst\t!ptr" %} 6146 ins_encode %{ 6147 AddressLiteral polling_page(os::get_polling_page()); 6148 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 6149 %} 6150 ins_pipe(loadConP_poll); 6151 %} 6152 6153 instruct loadConN0(iRegN dst, immN0 src) %{ 6154 match(Set dst src); 6155 6156 size(4); 6157 format %{ "CLR $dst\t! compressed NULL ptr" %} 6158 ins_encode %{ 6159 __ clr($dst$$Register); 6160 %} 6161 ins_pipe(ialu_imm); 6162 %} 6163 6164 instruct loadConN(iRegN dst, immN src) %{ 6165 match(Set dst src); 6166 ins_cost(DEFAULT_COST * 3/2); 6167 format %{ "SET $src,$dst\t! compressed ptr" %} 6168 ins_encode %{ 6169 Register dst = $dst$$Register; 6170 __ set_narrow_oop((jobject)$src$$constant, dst); 6171 %} 6172 ins_pipe(ialu_hi_lo_reg); 6173 %} 6174 6175 instruct loadConNKlass(iRegN dst, immNKlass src) %{ 6176 match(Set dst src); 6177 ins_cost(DEFAULT_COST * 3/2); 6178 format %{ "SET $src,$dst\t! compressed klass ptr" %} 6179 ins_encode %{ 6180 Register dst = $dst$$Register; 6181 __ set_narrow_klass((Klass*)$src$$constant, dst); 6182 %} 6183 ins_pipe(ialu_hi_lo_reg); 6184 %} 6185 6186 // Materialize long value (predicated by immL_cheap). 6187 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 6188 match(Set dst con); 6189 effect(KILL tmp); 6190 ins_cost(DEFAULT_COST * 3); 6191 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 6192 ins_encode %{ 6193 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 6194 %} 6195 ins_pipe(loadConL); 6196 %} 6197 6198 // Load long value from constant table (predicated by immL_expensive). 6199 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 6200 match(Set dst con); 6201 ins_cost(MEMORY_REF_COST); 6202 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 6203 ins_encode %{ 6204 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6205 __ ldx($constanttablebase, con_offset, $dst$$Register); 6206 %} 6207 ins_pipe(loadConL); 6208 %} 6209 6210 instruct loadConL0( iRegL dst, immL0 src ) %{ 6211 match(Set dst src); 6212 ins_cost(DEFAULT_COST); 6213 size(4); 6214 format %{ "CLR $dst\t! long" %} 6215 ins_encode( Set13( src, dst ) ); 6216 ins_pipe(ialu_imm); 6217 %} 6218 6219 instruct loadConL13( iRegL dst, immL13 src ) %{ 6220 match(Set dst src); 6221 ins_cost(DEFAULT_COST * 2); 6222 6223 size(4); 6224 format %{ "MOV $src,$dst\t! long" %} 6225 ins_encode( Set13( src, dst ) ); 6226 ins_pipe(ialu_imm); 6227 %} 6228 6229 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 6230 match(Set dst con); 6231 effect(KILL tmp); 6232 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 6233 ins_encode %{ 6234 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6235 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 6236 %} 6237 ins_pipe(loadConFD); 6238 %} 6239 6240 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 6241 match(Set dst con); 6242 effect(KILL tmp); 6243 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 6244 ins_encode %{ 6245 // XXX This is a quick fix for 6833573. 6246 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 6247 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6248 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 6249 %} 6250 ins_pipe(loadConFD); 6251 %} 6252 6253 // Prefetch instructions. 6254 // Must be safe to execute with invalid address (cannot fault). 6255 6256 instruct prefetchr( memory mem ) %{ 6257 match( PrefetchRead mem ); 6258 ins_cost(MEMORY_REF_COST); 6259 size(4); 6260 6261 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %} 6262 opcode(Assembler::prefetch_op3); 6263 ins_encode( form3_mem_prefetch_read( mem ) ); 6264 ins_pipe(iload_mem); 6265 %} 6266 6267 instruct prefetchw( memory mem ) %{ 6268 match( PrefetchWrite mem ); 6269 ins_cost(MEMORY_REF_COST); 6270 size(4); 6271 6272 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %} 6273 opcode(Assembler::prefetch_op3); 6274 ins_encode( form3_mem_prefetch_write( mem ) ); 6275 ins_pipe(iload_mem); 6276 %} 6277 6278 // Prefetch instructions for allocation. 6279 6280 instruct prefetchAlloc( memory mem ) %{ 6281 predicate(AllocatePrefetchInstr == 0); 6282 match( PrefetchAllocation mem ); 6283 ins_cost(MEMORY_REF_COST); 6284 size(4); 6285 6286 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 6287 opcode(Assembler::prefetch_op3); 6288 ins_encode( form3_mem_prefetch_write( mem ) ); 6289 ins_pipe(iload_mem); 6290 %} 6291 6292 // Use BIS instruction to prefetch for allocation. 6293 // Could fault, need space at the end of TLAB. 6294 instruct prefetchAlloc_bis( iRegP dst ) %{ 6295 predicate(AllocatePrefetchInstr == 1); 6296 match( PrefetchAllocation dst ); 6297 ins_cost(MEMORY_REF_COST); 6298 size(4); 6299 6300 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6301 ins_encode %{ 6302 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6303 %} 6304 ins_pipe(istore_mem_reg); 6305 %} 6306 6307 // Next code is used for finding next cache line address to prefetch. 6308 #ifndef _LP64 6309 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{ 6310 match(Set dst (CastX2P (AndI (CastP2X src) mask))); 6311 ins_cost(DEFAULT_COST); 6312 size(4); 6313 6314 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6315 ins_encode %{ 6316 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6317 %} 6318 ins_pipe(ialu_reg_imm); 6319 %} 6320 #else 6321 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6322 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6323 ins_cost(DEFAULT_COST); 6324 size(4); 6325 6326 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6327 ins_encode %{ 6328 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6329 %} 6330 ins_pipe(ialu_reg_imm); 6331 %} 6332 #endif 6333 6334 //----------Store Instructions------------------------------------------------- 6335 // Store Byte 6336 instruct storeB(memory mem, iRegI src) %{ 6337 match(Set mem (StoreB mem src)); 6338 ins_cost(MEMORY_REF_COST); 6339 6340 size(4); 6341 format %{ "STB $src,$mem\t! byte" %} 6342 opcode(Assembler::stb_op3); 6343 ins_encode(simple_form3_mem_reg( mem, src ) ); 6344 ins_pipe(istore_mem_reg); 6345 %} 6346 6347 instruct storeB0(memory mem, immI0 src) %{ 6348 match(Set mem (StoreB mem src)); 6349 ins_cost(MEMORY_REF_COST); 6350 6351 size(4); 6352 format %{ "STB $src,$mem\t! byte" %} 6353 opcode(Assembler::stb_op3); 6354 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6355 ins_pipe(istore_mem_zero); 6356 %} 6357 6358 instruct storeCM0(memory mem, immI0 src) %{ 6359 match(Set mem (StoreCM mem src)); 6360 ins_cost(MEMORY_REF_COST); 6361 6362 size(4); 6363 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6364 opcode(Assembler::stb_op3); 6365 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6366 ins_pipe(istore_mem_zero); 6367 %} 6368 6369 // Store Char/Short 6370 instruct storeC(memory mem, iRegI src) %{ 6371 match(Set mem (StoreC mem src)); 6372 ins_cost(MEMORY_REF_COST); 6373 6374 size(4); 6375 format %{ "STH $src,$mem\t! short" %} 6376 opcode(Assembler::sth_op3); 6377 ins_encode(simple_form3_mem_reg( mem, src ) ); 6378 ins_pipe(istore_mem_reg); 6379 %} 6380 6381 instruct storeC0(memory mem, immI0 src) %{ 6382 match(Set mem (StoreC mem src)); 6383 ins_cost(MEMORY_REF_COST); 6384 6385 size(4); 6386 format %{ "STH $src,$mem\t! short" %} 6387 opcode(Assembler::sth_op3); 6388 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6389 ins_pipe(istore_mem_zero); 6390 %} 6391 6392 // Store Integer 6393 instruct storeI(memory mem, iRegI src) %{ 6394 match(Set mem (StoreI mem src)); 6395 ins_cost(MEMORY_REF_COST); 6396 6397 size(4); 6398 format %{ "STW $src,$mem" %} 6399 opcode(Assembler::stw_op3); 6400 ins_encode(simple_form3_mem_reg( mem, src ) ); 6401 ins_pipe(istore_mem_reg); 6402 %} 6403 6404 // Store Long 6405 instruct storeL(memory mem, iRegL src) %{ 6406 match(Set mem (StoreL mem src)); 6407 ins_cost(MEMORY_REF_COST); 6408 size(4); 6409 format %{ "STX $src,$mem\t! long" %} 6410 opcode(Assembler::stx_op3); 6411 ins_encode(simple_form3_mem_reg( mem, src ) ); 6412 ins_pipe(istore_mem_reg); 6413 %} 6414 6415 instruct storeI0(memory mem, immI0 src) %{ 6416 match(Set mem (StoreI mem src)); 6417 ins_cost(MEMORY_REF_COST); 6418 6419 size(4); 6420 format %{ "STW $src,$mem" %} 6421 opcode(Assembler::stw_op3); 6422 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6423 ins_pipe(istore_mem_zero); 6424 %} 6425 6426 instruct storeL0(memory mem, immL0 src) %{ 6427 match(Set mem (StoreL mem src)); 6428 ins_cost(MEMORY_REF_COST); 6429 6430 size(4); 6431 format %{ "STX $src,$mem" %} 6432 opcode(Assembler::stx_op3); 6433 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6434 ins_pipe(istore_mem_zero); 6435 %} 6436 6437 // Store Integer from float register (used after fstoi) 6438 instruct storeI_Freg(memory mem, regF src) %{ 6439 match(Set mem (StoreI mem src)); 6440 ins_cost(MEMORY_REF_COST); 6441 6442 size(4); 6443 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6444 opcode(Assembler::stf_op3); 6445 ins_encode(simple_form3_mem_reg( mem, src ) ); 6446 ins_pipe(fstoreF_mem_reg); 6447 %} 6448 6449 // Store Pointer 6450 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6451 match(Set dst (StoreP dst src)); 6452 ins_cost(MEMORY_REF_COST); 6453 size(4); 6454 6455 #ifndef _LP64 6456 format %{ "STW $src,$dst\t! ptr" %} 6457 opcode(Assembler::stw_op3, 0, REGP_OP); 6458 #else 6459 format %{ "STX $src,$dst\t! ptr" %} 6460 opcode(Assembler::stx_op3, 0, REGP_OP); 6461 #endif 6462 ins_encode( form3_mem_reg( dst, src ) ); 6463 ins_pipe(istore_mem_spORreg); 6464 %} 6465 6466 instruct storeP0(memory dst, immP0 src) %{ 6467 match(Set dst (StoreP dst src)); 6468 ins_cost(MEMORY_REF_COST); 6469 size(4); 6470 6471 #ifndef _LP64 6472 format %{ "STW $src,$dst\t! ptr" %} 6473 opcode(Assembler::stw_op3, 0, REGP_OP); 6474 #else 6475 format %{ "STX $src,$dst\t! ptr" %} 6476 opcode(Assembler::stx_op3, 0, REGP_OP); 6477 #endif 6478 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6479 ins_pipe(istore_mem_zero); 6480 %} 6481 6482 // Store Compressed Pointer 6483 instruct storeN(memory dst, iRegN src) %{ 6484 match(Set dst (StoreN dst src)); 6485 ins_cost(MEMORY_REF_COST); 6486 size(4); 6487 6488 format %{ "STW $src,$dst\t! compressed ptr" %} 6489 ins_encode %{ 6490 Register base = as_Register($dst$$base); 6491 Register index = as_Register($dst$$index); 6492 Register src = $src$$Register; 6493 if (index != G0) { 6494 __ stw(src, base, index); 6495 } else { 6496 __ stw(src, base, $dst$$disp); 6497 } 6498 %} 6499 ins_pipe(istore_mem_spORreg); 6500 %} 6501 6502 instruct storeNKlass(memory dst, iRegN src) %{ 6503 match(Set dst (StoreNKlass dst src)); 6504 ins_cost(MEMORY_REF_COST); 6505 size(4); 6506 6507 format %{ "STW $src,$dst\t! compressed klass ptr" %} 6508 ins_encode %{ 6509 Register base = as_Register($dst$$base); 6510 Register index = as_Register($dst$$index); 6511 Register src = $src$$Register; 6512 if (index != G0) { 6513 __ stw(src, base, index); 6514 } else { 6515 __ stw(src, base, $dst$$disp); 6516 } 6517 %} 6518 ins_pipe(istore_mem_spORreg); 6519 %} 6520 6521 instruct storeN0(memory dst, immN0 src) %{ 6522 match(Set dst (StoreN dst src)); 6523 ins_cost(MEMORY_REF_COST); 6524 size(4); 6525 6526 format %{ "STW $src,$dst\t! compressed ptr" %} 6527 ins_encode %{ 6528 Register base = as_Register($dst$$base); 6529 Register index = as_Register($dst$$index); 6530 if (index != G0) { 6531 __ stw(0, base, index); 6532 } else { 6533 __ stw(0, base, $dst$$disp); 6534 } 6535 %} 6536 ins_pipe(istore_mem_zero); 6537 %} 6538 6539 // Store Double 6540 instruct storeD( memory mem, regD src) %{ 6541 match(Set mem (StoreD mem src)); 6542 ins_cost(MEMORY_REF_COST); 6543 6544 size(4); 6545 format %{ "STDF $src,$mem" %} 6546 opcode(Assembler::stdf_op3); 6547 ins_encode(simple_form3_mem_reg( mem, src ) ); 6548 ins_pipe(fstoreD_mem_reg); 6549 %} 6550 6551 instruct storeD0( memory mem, immD0 src) %{ 6552 match(Set mem (StoreD mem src)); 6553 ins_cost(MEMORY_REF_COST); 6554 6555 size(4); 6556 format %{ "STX $src,$mem" %} 6557 opcode(Assembler::stx_op3); 6558 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6559 ins_pipe(fstoreD_mem_zero); 6560 %} 6561 6562 // Store Float 6563 instruct storeF( memory mem, regF src) %{ 6564 match(Set mem (StoreF mem src)); 6565 ins_cost(MEMORY_REF_COST); 6566 6567 size(4); 6568 format %{ "STF $src,$mem" %} 6569 opcode(Assembler::stf_op3); 6570 ins_encode(simple_form3_mem_reg( mem, src ) ); 6571 ins_pipe(fstoreF_mem_reg); 6572 %} 6573 6574 instruct storeF0( memory mem, immF0 src) %{ 6575 match(Set mem (StoreF mem src)); 6576 ins_cost(MEMORY_REF_COST); 6577 6578 size(4); 6579 format %{ "STW $src,$mem\t! storeF0" %} 6580 opcode(Assembler::stw_op3); 6581 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6582 ins_pipe(fstoreF_mem_zero); 6583 %} 6584 6585 // Convert oop pointer into compressed form 6586 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6587 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6588 match(Set dst (EncodeP src)); 6589 format %{ "encode_heap_oop $src, $dst" %} 6590 ins_encode %{ 6591 __ encode_heap_oop($src$$Register, $dst$$Register); 6592 %} 6593 ins_pipe(ialu_reg); 6594 %} 6595 6596 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6597 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6598 match(Set dst (EncodeP src)); 6599 format %{ "encode_heap_oop_not_null $src, $dst" %} 6600 ins_encode %{ 6601 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6602 %} 6603 ins_pipe(ialu_reg); 6604 %} 6605 6606 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6607 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6608 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6609 match(Set dst (DecodeN src)); 6610 format %{ "decode_heap_oop $src, $dst" %} 6611 ins_encode %{ 6612 __ decode_heap_oop($src$$Register, $dst$$Register); 6613 %} 6614 ins_pipe(ialu_reg); 6615 %} 6616 6617 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6618 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6619 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6620 match(Set dst (DecodeN src)); 6621 format %{ "decode_heap_oop_not_null $src, $dst" %} 6622 ins_encode %{ 6623 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6624 %} 6625 ins_pipe(ialu_reg); 6626 %} 6627 6628 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{ 6629 match(Set dst (EncodePKlass src)); 6630 format %{ "encode_klass_not_null $src, $dst" %} 6631 ins_encode %{ 6632 __ encode_klass_not_null($src$$Register, $dst$$Register); 6633 %} 6634 ins_pipe(ialu_reg); 6635 %} 6636 6637 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ 6638 match(Set dst (DecodeNKlass src)); 6639 format %{ "decode_klass_not_null $src, $dst" %} 6640 ins_encode %{ 6641 __ decode_klass_not_null($src$$Register, $dst$$Register); 6642 %} 6643 ins_pipe(ialu_reg); 6644 %} 6645 6646 //----------MemBar Instructions----------------------------------------------- 6647 // Memory barrier flavors 6648 6649 instruct membar_acquire() %{ 6650 match(MemBarAcquire); 6651 ins_cost(4*MEMORY_REF_COST); 6652 6653 size(0); 6654 format %{ "MEMBAR-acquire" %} 6655 ins_encode( enc_membar_acquire ); 6656 ins_pipe(long_memory_op); 6657 %} 6658 6659 instruct membar_acquire_lock() %{ 6660 match(MemBarAcquireLock); 6661 ins_cost(0); 6662 6663 size(0); 6664 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6665 ins_encode( ); 6666 ins_pipe(empty); 6667 %} 6668 6669 instruct membar_release() %{ 6670 match(MemBarRelease); 6671 ins_cost(4*MEMORY_REF_COST); 6672 6673 size(0); 6674 format %{ "MEMBAR-release" %} 6675 ins_encode( enc_membar_release ); 6676 ins_pipe(long_memory_op); 6677 %} 6678 6679 instruct membar_release_lock() %{ 6680 match(MemBarReleaseLock); 6681 ins_cost(0); 6682 6683 size(0); 6684 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6685 ins_encode( ); 6686 ins_pipe(empty); 6687 %} 6688 6689 instruct membar_volatile() %{ 6690 match(MemBarVolatile); 6691 ins_cost(4*MEMORY_REF_COST); 6692 6693 size(4); 6694 format %{ "MEMBAR-volatile" %} 6695 ins_encode( enc_membar_volatile ); 6696 ins_pipe(long_memory_op); 6697 %} 6698 6699 instruct unnecessary_membar_volatile() %{ 6700 match(MemBarVolatile); 6701 predicate(Matcher::post_store_load_barrier(n)); 6702 ins_cost(0); 6703 6704 size(0); 6705 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6706 ins_encode( ); 6707 ins_pipe(empty); 6708 %} 6709 6710 instruct membar_storestore() %{ 6711 match(MemBarStoreStore); 6712 ins_cost(0); 6713 6714 size(0); 6715 format %{ "!MEMBAR-storestore (empty encoding)" %} 6716 ins_encode( ); 6717 ins_pipe(empty); 6718 %} 6719 6720 //----------Register Move Instructions----------------------------------------- 6721 instruct roundDouble_nop(regD dst) %{ 6722 match(Set dst (RoundDouble dst)); 6723 ins_cost(0); 6724 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6725 ins_encode( ); 6726 ins_pipe(empty); 6727 %} 6728 6729 6730 instruct roundFloat_nop(regF dst) %{ 6731 match(Set dst (RoundFloat dst)); 6732 ins_cost(0); 6733 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6734 ins_encode( ); 6735 ins_pipe(empty); 6736 %} 6737 6738 6739 // Cast Index to Pointer for unsafe natives 6740 instruct castX2P(iRegX src, iRegP dst) %{ 6741 match(Set dst (CastX2P src)); 6742 6743 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6744 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6745 ins_pipe(ialu_reg); 6746 %} 6747 6748 // Cast Pointer to Index for unsafe natives 6749 instruct castP2X(iRegP src, iRegX dst) %{ 6750 match(Set dst (CastP2X src)); 6751 6752 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6753 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6754 ins_pipe(ialu_reg); 6755 %} 6756 6757 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6758 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6759 match(Set stkSlot src); // chain rule 6760 ins_cost(MEMORY_REF_COST); 6761 format %{ "STDF $src,$stkSlot\t!stk" %} 6762 opcode(Assembler::stdf_op3); 6763 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6764 ins_pipe(fstoreD_stk_reg); 6765 %} 6766 6767 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6768 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6769 match(Set dst stkSlot); // chain rule 6770 ins_cost(MEMORY_REF_COST); 6771 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6772 opcode(Assembler::lddf_op3); 6773 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6774 ins_pipe(floadD_stk); 6775 %} 6776 6777 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6778 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6779 match(Set stkSlot src); // chain rule 6780 ins_cost(MEMORY_REF_COST); 6781 format %{ "STF $src,$stkSlot\t!stk" %} 6782 opcode(Assembler::stf_op3); 6783 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6784 ins_pipe(fstoreF_stk_reg); 6785 %} 6786 6787 //----------Conditional Move--------------------------------------------------- 6788 // Conditional move 6789 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6790 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6791 ins_cost(150); 6792 format %{ "MOV$cmp $pcc,$src,$dst" %} 6793 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6794 ins_pipe(ialu_reg); 6795 %} 6796 6797 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6798 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6799 ins_cost(140); 6800 format %{ "MOV$cmp $pcc,$src,$dst" %} 6801 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6802 ins_pipe(ialu_imm); 6803 %} 6804 6805 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6806 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6807 ins_cost(150); 6808 size(4); 6809 format %{ "MOV$cmp $icc,$src,$dst" %} 6810 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6811 ins_pipe(ialu_reg); 6812 %} 6813 6814 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6815 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6816 ins_cost(140); 6817 size(4); 6818 format %{ "MOV$cmp $icc,$src,$dst" %} 6819 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6820 ins_pipe(ialu_imm); 6821 %} 6822 6823 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6824 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6825 ins_cost(150); 6826 size(4); 6827 format %{ "MOV$cmp $icc,$src,$dst" %} 6828 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6829 ins_pipe(ialu_reg); 6830 %} 6831 6832 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6833 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6834 ins_cost(140); 6835 size(4); 6836 format %{ "MOV$cmp $icc,$src,$dst" %} 6837 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6838 ins_pipe(ialu_imm); 6839 %} 6840 6841 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6842 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6843 ins_cost(150); 6844 size(4); 6845 format %{ "MOV$cmp $fcc,$src,$dst" %} 6846 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6847 ins_pipe(ialu_reg); 6848 %} 6849 6850 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6851 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6852 ins_cost(140); 6853 size(4); 6854 format %{ "MOV$cmp $fcc,$src,$dst" %} 6855 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6856 ins_pipe(ialu_imm); 6857 %} 6858 6859 // Conditional move for RegN. Only cmov(reg,reg). 6860 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6861 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6862 ins_cost(150); 6863 format %{ "MOV$cmp $pcc,$src,$dst" %} 6864 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6865 ins_pipe(ialu_reg); 6866 %} 6867 6868 // This instruction also works with CmpN so we don't need cmovNN_reg. 6869 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6870 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6871 ins_cost(150); 6872 size(4); 6873 format %{ "MOV$cmp $icc,$src,$dst" %} 6874 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6875 ins_pipe(ialu_reg); 6876 %} 6877 6878 // This instruction also works with CmpN so we don't need cmovNN_reg. 6879 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6880 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6881 ins_cost(150); 6882 size(4); 6883 format %{ "MOV$cmp $icc,$src,$dst" %} 6884 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6885 ins_pipe(ialu_reg); 6886 %} 6887 6888 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6889 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6890 ins_cost(150); 6891 size(4); 6892 format %{ "MOV$cmp $fcc,$src,$dst" %} 6893 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6894 ins_pipe(ialu_reg); 6895 %} 6896 6897 // Conditional move 6898 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6899 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6900 ins_cost(150); 6901 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6902 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6903 ins_pipe(ialu_reg); 6904 %} 6905 6906 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6907 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6908 ins_cost(140); 6909 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6910 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6911 ins_pipe(ialu_imm); 6912 %} 6913 6914 // This instruction also works with CmpN so we don't need cmovPN_reg. 6915 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6916 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6917 ins_cost(150); 6918 6919 size(4); 6920 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6921 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6922 ins_pipe(ialu_reg); 6923 %} 6924 6925 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6926 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6927 ins_cost(150); 6928 6929 size(4); 6930 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6931 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6932 ins_pipe(ialu_reg); 6933 %} 6934 6935 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 6936 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6937 ins_cost(140); 6938 6939 size(4); 6940 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6941 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6942 ins_pipe(ialu_imm); 6943 %} 6944 6945 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 6946 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6947 ins_cost(140); 6948 6949 size(4); 6950 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6951 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6952 ins_pipe(ialu_imm); 6953 %} 6954 6955 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 6956 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6957 ins_cost(150); 6958 size(4); 6959 format %{ "MOV$cmp $fcc,$src,$dst" %} 6960 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6961 ins_pipe(ialu_imm); 6962 %} 6963 6964 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 6965 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 6966 ins_cost(140); 6967 size(4); 6968 format %{ "MOV$cmp $fcc,$src,$dst" %} 6969 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6970 ins_pipe(ialu_imm); 6971 %} 6972 6973 // Conditional move 6974 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 6975 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 6976 ins_cost(150); 6977 opcode(0x101); 6978 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 6979 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6980 ins_pipe(int_conditional_float_move); 6981 %} 6982 6983 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 6984 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6985 ins_cost(150); 6986 6987 size(4); 6988 format %{ "FMOVS$cmp $icc,$src,$dst" %} 6989 opcode(0x101); 6990 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 6991 ins_pipe(int_conditional_float_move); 6992 %} 6993 6994 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 6995 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 6996 ins_cost(150); 6997 6998 size(4); 6999 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7000 opcode(0x101); 7001 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7002 ins_pipe(int_conditional_float_move); 7003 %} 7004 7005 // Conditional move, 7006 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 7007 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 7008 ins_cost(150); 7009 size(4); 7010 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 7011 opcode(0x1); 7012 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7013 ins_pipe(int_conditional_double_move); 7014 %} 7015 7016 // Conditional move 7017 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 7018 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 7019 ins_cost(150); 7020 size(4); 7021 opcode(0x102); 7022 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7023 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7024 ins_pipe(int_conditional_double_move); 7025 %} 7026 7027 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 7028 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7029 ins_cost(150); 7030 7031 size(4); 7032 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7033 opcode(0x102); 7034 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7035 ins_pipe(int_conditional_double_move); 7036 %} 7037 7038 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 7039 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7040 ins_cost(150); 7041 7042 size(4); 7043 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7044 opcode(0x102); 7045 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7046 ins_pipe(int_conditional_double_move); 7047 %} 7048 7049 // Conditional move, 7050 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 7051 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 7052 ins_cost(150); 7053 size(4); 7054 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 7055 opcode(0x2); 7056 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7057 ins_pipe(int_conditional_double_move); 7058 %} 7059 7060 // Conditional move 7061 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 7062 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7063 ins_cost(150); 7064 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7065 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7066 ins_pipe(ialu_reg); 7067 %} 7068 7069 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 7070 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7071 ins_cost(140); 7072 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7073 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 7074 ins_pipe(ialu_imm); 7075 %} 7076 7077 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 7078 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7079 ins_cost(150); 7080 7081 size(4); 7082 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7083 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7084 ins_pipe(ialu_reg); 7085 %} 7086 7087 7088 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 7089 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7090 ins_cost(150); 7091 7092 size(4); 7093 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7094 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7095 ins_pipe(ialu_reg); 7096 %} 7097 7098 7099 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 7100 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 7101 ins_cost(150); 7102 7103 size(4); 7104 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 7105 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7106 ins_pipe(ialu_reg); 7107 %} 7108 7109 7110 7111 //----------OS and Locking Instructions---------------------------------------- 7112 7113 // This name is KNOWN by the ADLC and cannot be changed. 7114 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 7115 // for this guy. 7116 instruct tlsLoadP(g2RegP dst) %{ 7117 match(Set dst (ThreadLocal)); 7118 7119 size(0); 7120 ins_cost(0); 7121 format %{ "# TLS is in G2" %} 7122 ins_encode( /*empty encoding*/ ); 7123 ins_pipe(ialu_none); 7124 %} 7125 7126 instruct checkCastPP( iRegP dst ) %{ 7127 match(Set dst (CheckCastPP dst)); 7128 7129 size(0); 7130 format %{ "# checkcastPP of $dst" %} 7131 ins_encode( /*empty encoding*/ ); 7132 ins_pipe(empty); 7133 %} 7134 7135 7136 instruct castPP( iRegP dst ) %{ 7137 match(Set dst (CastPP dst)); 7138 format %{ "# castPP of $dst" %} 7139 ins_encode( /*empty encoding*/ ); 7140 ins_pipe(empty); 7141 %} 7142 7143 instruct castII( iRegI dst ) %{ 7144 match(Set dst (CastII dst)); 7145 format %{ "# castII of $dst" %} 7146 ins_encode( /*empty encoding*/ ); 7147 ins_cost(0); 7148 ins_pipe(empty); 7149 %} 7150 7151 //----------Arithmetic Instructions-------------------------------------------- 7152 // Addition Instructions 7153 // Register Addition 7154 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7155 match(Set dst (AddI src1 src2)); 7156 7157 size(4); 7158 format %{ "ADD $src1,$src2,$dst" %} 7159 ins_encode %{ 7160 __ add($src1$$Register, $src2$$Register, $dst$$Register); 7161 %} 7162 ins_pipe(ialu_reg_reg); 7163 %} 7164 7165 // Immediate Addition 7166 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7167 match(Set dst (AddI src1 src2)); 7168 7169 size(4); 7170 format %{ "ADD $src1,$src2,$dst" %} 7171 opcode(Assembler::add_op3, Assembler::arith_op); 7172 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7173 ins_pipe(ialu_reg_imm); 7174 %} 7175 7176 // Pointer Register Addition 7177 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 7178 match(Set dst (AddP src1 src2)); 7179 7180 size(4); 7181 format %{ "ADD $src1,$src2,$dst" %} 7182 opcode(Assembler::add_op3, Assembler::arith_op); 7183 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7184 ins_pipe(ialu_reg_reg); 7185 %} 7186 7187 // Pointer Immediate Addition 7188 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 7189 match(Set dst (AddP src1 src2)); 7190 7191 size(4); 7192 format %{ "ADD $src1,$src2,$dst" %} 7193 opcode(Assembler::add_op3, Assembler::arith_op); 7194 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7195 ins_pipe(ialu_reg_imm); 7196 %} 7197 7198 // Long Addition 7199 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7200 match(Set dst (AddL src1 src2)); 7201 7202 size(4); 7203 format %{ "ADD $src1,$src2,$dst\t! long" %} 7204 opcode(Assembler::add_op3, Assembler::arith_op); 7205 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7206 ins_pipe(ialu_reg_reg); 7207 %} 7208 7209 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7210 match(Set dst (AddL src1 con)); 7211 7212 size(4); 7213 format %{ "ADD $src1,$con,$dst" %} 7214 opcode(Assembler::add_op3, Assembler::arith_op); 7215 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7216 ins_pipe(ialu_reg_imm); 7217 %} 7218 7219 //----------Conditional_store-------------------------------------------------- 7220 // Conditional-store of the updated heap-top. 7221 // Used during allocation of the shared heap. 7222 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 7223 7224 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 7225 instruct loadPLocked(iRegP dst, memory mem) %{ 7226 match(Set dst (LoadPLocked mem)); 7227 ins_cost(MEMORY_REF_COST); 7228 7229 #ifndef _LP64 7230 size(4); 7231 format %{ "LDUW $mem,$dst\t! ptr" %} 7232 opcode(Assembler::lduw_op3, 0, REGP_OP); 7233 #else 7234 format %{ "LDX $mem,$dst\t! ptr" %} 7235 opcode(Assembler::ldx_op3, 0, REGP_OP); 7236 #endif 7237 ins_encode( form3_mem_reg( mem, dst ) ); 7238 ins_pipe(iload_mem); 7239 %} 7240 7241 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 7242 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 7243 effect( KILL newval ); 7244 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 7245 "CMP R_G3,$oldval\t\t! See if we made progress" %} 7246 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 7247 ins_pipe( long_memory_op ); 7248 %} 7249 7250 // Conditional-store of an int value. 7251 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 7252 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 7253 effect( KILL newval ); 7254 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7255 "CMP $oldval,$newval\t\t! See if we made progress" %} 7256 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7257 ins_pipe( long_memory_op ); 7258 %} 7259 7260 // Conditional-store of a long value. 7261 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 7262 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 7263 effect( KILL newval ); 7264 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7265 "CMP $oldval,$newval\t\t! See if we made progress" %} 7266 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7267 ins_pipe( long_memory_op ); 7268 %} 7269 7270 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7271 7272 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7273 predicate(VM_Version::supports_cx8()); 7274 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7275 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7276 format %{ 7277 "MOV $newval,O7\n\t" 7278 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7279 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7280 "MOV 1,$res\n\t" 7281 "MOVne xcc,R_G0,$res" 7282 %} 7283 ins_encode( enc_casx(mem_ptr, oldval, newval), 7284 enc_lflags_ne_to_boolean(res) ); 7285 ins_pipe( long_memory_op ); 7286 %} 7287 7288 7289 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7290 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7291 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7292 format %{ 7293 "MOV $newval,O7\n\t" 7294 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7295 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7296 "MOV 1,$res\n\t" 7297 "MOVne icc,R_G0,$res" 7298 %} 7299 ins_encode( enc_casi(mem_ptr, oldval, newval), 7300 enc_iflags_ne_to_boolean(res) ); 7301 ins_pipe( long_memory_op ); 7302 %} 7303 7304 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7305 #ifdef _LP64 7306 predicate(VM_Version::supports_cx8()); 7307 #endif 7308 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7309 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7310 format %{ 7311 "MOV $newval,O7\n\t" 7312 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7313 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7314 "MOV 1,$res\n\t" 7315 "MOVne xcc,R_G0,$res" 7316 %} 7317 #ifdef _LP64 7318 ins_encode( enc_casx(mem_ptr, oldval, newval), 7319 enc_lflags_ne_to_boolean(res) ); 7320 #else 7321 ins_encode( enc_casi(mem_ptr, oldval, newval), 7322 enc_iflags_ne_to_boolean(res) ); 7323 #endif 7324 ins_pipe( long_memory_op ); 7325 %} 7326 7327 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7328 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7329 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7330 format %{ 7331 "MOV $newval,O7\n\t" 7332 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7333 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7334 "MOV 1,$res\n\t" 7335 "MOVne icc,R_G0,$res" 7336 %} 7337 ins_encode( enc_casi(mem_ptr, oldval, newval), 7338 enc_iflags_ne_to_boolean(res) ); 7339 ins_pipe( long_memory_op ); 7340 %} 7341 7342 instruct xchgI( memory mem, iRegI newval) %{ 7343 match(Set newval (GetAndSetI mem newval)); 7344 format %{ "SWAP [$mem],$newval" %} 7345 size(4); 7346 ins_encode %{ 7347 __ swap($mem$$Address, $newval$$Register); 7348 %} 7349 ins_pipe( long_memory_op ); 7350 %} 7351 7352 #ifndef _LP64 7353 instruct xchgP( memory mem, iRegP newval) %{ 7354 match(Set newval (GetAndSetP mem newval)); 7355 format %{ "SWAP [$mem],$newval" %} 7356 size(4); 7357 ins_encode %{ 7358 __ swap($mem$$Address, $newval$$Register); 7359 %} 7360 ins_pipe( long_memory_op ); 7361 %} 7362 #endif 7363 7364 instruct xchgN( memory mem, iRegN newval) %{ 7365 match(Set newval (GetAndSetN mem newval)); 7366 format %{ "SWAP [$mem],$newval" %} 7367 size(4); 7368 ins_encode %{ 7369 __ swap($mem$$Address, $newval$$Register); 7370 %} 7371 ins_pipe( long_memory_op ); 7372 %} 7373 7374 //--------------------- 7375 // Subtraction Instructions 7376 // Register Subtraction 7377 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7378 match(Set dst (SubI src1 src2)); 7379 7380 size(4); 7381 format %{ "SUB $src1,$src2,$dst" %} 7382 opcode(Assembler::sub_op3, Assembler::arith_op); 7383 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7384 ins_pipe(ialu_reg_reg); 7385 %} 7386 7387 // Immediate Subtraction 7388 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7389 match(Set dst (SubI src1 src2)); 7390 7391 size(4); 7392 format %{ "SUB $src1,$src2,$dst" %} 7393 opcode(Assembler::sub_op3, Assembler::arith_op); 7394 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7395 ins_pipe(ialu_reg_imm); 7396 %} 7397 7398 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7399 match(Set dst (SubI zero src2)); 7400 7401 size(4); 7402 format %{ "NEG $src2,$dst" %} 7403 opcode(Assembler::sub_op3, Assembler::arith_op); 7404 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7405 ins_pipe(ialu_zero_reg); 7406 %} 7407 7408 // Long subtraction 7409 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7410 match(Set dst (SubL src1 src2)); 7411 7412 size(4); 7413 format %{ "SUB $src1,$src2,$dst\t! long" %} 7414 opcode(Assembler::sub_op3, Assembler::arith_op); 7415 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7416 ins_pipe(ialu_reg_reg); 7417 %} 7418 7419 // Immediate Subtraction 7420 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7421 match(Set dst (SubL src1 con)); 7422 7423 size(4); 7424 format %{ "SUB $src1,$con,$dst\t! long" %} 7425 opcode(Assembler::sub_op3, Assembler::arith_op); 7426 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7427 ins_pipe(ialu_reg_imm); 7428 %} 7429 7430 // Long negation 7431 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7432 match(Set dst (SubL zero src2)); 7433 7434 size(4); 7435 format %{ "NEG $src2,$dst\t! long" %} 7436 opcode(Assembler::sub_op3, Assembler::arith_op); 7437 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7438 ins_pipe(ialu_zero_reg); 7439 %} 7440 7441 // Multiplication Instructions 7442 // Integer Multiplication 7443 // Register Multiplication 7444 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7445 match(Set dst (MulI src1 src2)); 7446 7447 size(4); 7448 format %{ "MULX $src1,$src2,$dst" %} 7449 opcode(Assembler::mulx_op3, Assembler::arith_op); 7450 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7451 ins_pipe(imul_reg_reg); 7452 %} 7453 7454 // Immediate Multiplication 7455 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7456 match(Set dst (MulI src1 src2)); 7457 7458 size(4); 7459 format %{ "MULX $src1,$src2,$dst" %} 7460 opcode(Assembler::mulx_op3, Assembler::arith_op); 7461 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7462 ins_pipe(imul_reg_imm); 7463 %} 7464 7465 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7466 match(Set dst (MulL src1 src2)); 7467 ins_cost(DEFAULT_COST * 5); 7468 size(4); 7469 format %{ "MULX $src1,$src2,$dst\t! long" %} 7470 opcode(Assembler::mulx_op3, Assembler::arith_op); 7471 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7472 ins_pipe(mulL_reg_reg); 7473 %} 7474 7475 // Immediate Multiplication 7476 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7477 match(Set dst (MulL src1 src2)); 7478 ins_cost(DEFAULT_COST * 5); 7479 size(4); 7480 format %{ "MULX $src1,$src2,$dst" %} 7481 opcode(Assembler::mulx_op3, Assembler::arith_op); 7482 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7483 ins_pipe(mulL_reg_imm); 7484 %} 7485 7486 // Integer Division 7487 // Register Division 7488 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7489 match(Set dst (DivI src1 src2)); 7490 ins_cost((2+71)*DEFAULT_COST); 7491 7492 format %{ "SRA $src2,0,$src2\n\t" 7493 "SRA $src1,0,$src1\n\t" 7494 "SDIVX $src1,$src2,$dst" %} 7495 ins_encode( idiv_reg( src1, src2, dst ) ); 7496 ins_pipe(sdiv_reg_reg); 7497 %} 7498 7499 // Immediate Division 7500 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7501 match(Set dst (DivI src1 src2)); 7502 ins_cost((2+71)*DEFAULT_COST); 7503 7504 format %{ "SRA $src1,0,$src1\n\t" 7505 "SDIVX $src1,$src2,$dst" %} 7506 ins_encode( idiv_imm( src1, src2, dst ) ); 7507 ins_pipe(sdiv_reg_imm); 7508 %} 7509 7510 //----------Div-By-10-Expansion------------------------------------------------ 7511 // Extract hi bits of a 32x32->64 bit multiply. 7512 // Expand rule only, not matched 7513 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7514 effect( DEF dst, USE src1, USE src2 ); 7515 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7516 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7517 ins_encode( enc_mul_hi(dst,src1,src2)); 7518 ins_pipe(sdiv_reg_reg); 7519 %} 7520 7521 // Magic constant, reciprocal of 10 7522 instruct loadConI_x66666667(iRegIsafe dst) %{ 7523 effect( DEF dst ); 7524 7525 size(8); 7526 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7527 ins_encode( Set32(0x66666667, dst) ); 7528 ins_pipe(ialu_hi_lo_reg); 7529 %} 7530 7531 // Register Shift Right Arithmetic Long by 32-63 7532 instruct sra_31( iRegI dst, iRegI src ) %{ 7533 effect( DEF dst, USE src ); 7534 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7535 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7536 ins_pipe(ialu_reg_reg); 7537 %} 7538 7539 // Arithmetic Shift Right by 8-bit immediate 7540 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7541 effect( DEF dst, USE src ); 7542 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7543 opcode(Assembler::sra_op3, Assembler::arith_op); 7544 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7545 ins_pipe(ialu_reg_imm); 7546 %} 7547 7548 // Integer DIV with 10 7549 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7550 match(Set dst (DivI src div)); 7551 ins_cost((6+6)*DEFAULT_COST); 7552 expand %{ 7553 iRegIsafe tmp1; // Killed temps; 7554 iRegIsafe tmp2; // Killed temps; 7555 iRegI tmp3; // Killed temps; 7556 iRegI tmp4; // Killed temps; 7557 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7558 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7559 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7560 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7561 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7562 %} 7563 %} 7564 7565 // Register Long Division 7566 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7567 match(Set dst (DivL src1 src2)); 7568 ins_cost(DEFAULT_COST*71); 7569 size(4); 7570 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7571 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7572 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7573 ins_pipe(divL_reg_reg); 7574 %} 7575 7576 // Register Long Division 7577 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7578 match(Set dst (DivL src1 src2)); 7579 ins_cost(DEFAULT_COST*71); 7580 size(4); 7581 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7582 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7583 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7584 ins_pipe(divL_reg_imm); 7585 %} 7586 7587 // Integer Remainder 7588 // Register Remainder 7589 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7590 match(Set dst (ModI src1 src2)); 7591 effect( KILL ccr, KILL temp); 7592 7593 format %{ "SREM $src1,$src2,$dst" %} 7594 ins_encode( irem_reg(src1, src2, dst, temp) ); 7595 ins_pipe(sdiv_reg_reg); 7596 %} 7597 7598 // Immediate Remainder 7599 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7600 match(Set dst (ModI src1 src2)); 7601 effect( KILL ccr, KILL temp); 7602 7603 format %{ "SREM $src1,$src2,$dst" %} 7604 ins_encode( irem_imm(src1, src2, dst, temp) ); 7605 ins_pipe(sdiv_reg_imm); 7606 %} 7607 7608 // Register Long Remainder 7609 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7610 effect(DEF dst, USE src1, USE src2); 7611 size(4); 7612 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7613 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7614 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7615 ins_pipe(divL_reg_reg); 7616 %} 7617 7618 // Register Long Division 7619 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7620 effect(DEF dst, USE src1, USE src2); 7621 size(4); 7622 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7623 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7624 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7625 ins_pipe(divL_reg_imm); 7626 %} 7627 7628 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7629 effect(DEF dst, USE src1, USE src2); 7630 size(4); 7631 format %{ "MULX $src1,$src2,$dst\t! long" %} 7632 opcode(Assembler::mulx_op3, Assembler::arith_op); 7633 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7634 ins_pipe(mulL_reg_reg); 7635 %} 7636 7637 // Immediate Multiplication 7638 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7639 effect(DEF dst, USE src1, USE src2); 7640 size(4); 7641 format %{ "MULX $src1,$src2,$dst" %} 7642 opcode(Assembler::mulx_op3, Assembler::arith_op); 7643 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7644 ins_pipe(mulL_reg_imm); 7645 %} 7646 7647 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7648 effect(DEF dst, USE src1, USE src2); 7649 size(4); 7650 format %{ "SUB $src1,$src2,$dst\t! long" %} 7651 opcode(Assembler::sub_op3, Assembler::arith_op); 7652 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7653 ins_pipe(ialu_reg_reg); 7654 %} 7655 7656 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7657 effect(DEF dst, USE src1, USE src2); 7658 size(4); 7659 format %{ "SUB $src1,$src2,$dst\t! long" %} 7660 opcode(Assembler::sub_op3, Assembler::arith_op); 7661 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7662 ins_pipe(ialu_reg_reg); 7663 %} 7664 7665 // Register Long Remainder 7666 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7667 match(Set dst (ModL src1 src2)); 7668 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7669 expand %{ 7670 iRegL tmp1; 7671 iRegL tmp2; 7672 divL_reg_reg_1(tmp1, src1, src2); 7673 mulL_reg_reg_1(tmp2, tmp1, src2); 7674 subL_reg_reg_1(dst, src1, tmp2); 7675 %} 7676 %} 7677 7678 // Register Long Remainder 7679 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7680 match(Set dst (ModL src1 src2)); 7681 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7682 expand %{ 7683 iRegL tmp1; 7684 iRegL tmp2; 7685 divL_reg_imm13_1(tmp1, src1, src2); 7686 mulL_reg_imm13_1(tmp2, tmp1, src2); 7687 subL_reg_reg_2 (dst, src1, tmp2); 7688 %} 7689 %} 7690 7691 // Integer Shift Instructions 7692 // Register Shift Left 7693 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7694 match(Set dst (LShiftI src1 src2)); 7695 7696 size(4); 7697 format %{ "SLL $src1,$src2,$dst" %} 7698 opcode(Assembler::sll_op3, Assembler::arith_op); 7699 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7700 ins_pipe(ialu_reg_reg); 7701 %} 7702 7703 // Register Shift Left Immediate 7704 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7705 match(Set dst (LShiftI src1 src2)); 7706 7707 size(4); 7708 format %{ "SLL $src1,$src2,$dst" %} 7709 opcode(Assembler::sll_op3, Assembler::arith_op); 7710 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7711 ins_pipe(ialu_reg_imm); 7712 %} 7713 7714 // Register Shift Left 7715 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7716 match(Set dst (LShiftL src1 src2)); 7717 7718 size(4); 7719 format %{ "SLLX $src1,$src2,$dst" %} 7720 opcode(Assembler::sllx_op3, Assembler::arith_op); 7721 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7722 ins_pipe(ialu_reg_reg); 7723 %} 7724 7725 // Register Shift Left Immediate 7726 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7727 match(Set dst (LShiftL src1 src2)); 7728 7729 size(4); 7730 format %{ "SLLX $src1,$src2,$dst" %} 7731 opcode(Assembler::sllx_op3, Assembler::arith_op); 7732 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7733 ins_pipe(ialu_reg_imm); 7734 %} 7735 7736 // Register Arithmetic Shift Right 7737 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7738 match(Set dst (RShiftI src1 src2)); 7739 size(4); 7740 format %{ "SRA $src1,$src2,$dst" %} 7741 opcode(Assembler::sra_op3, Assembler::arith_op); 7742 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7743 ins_pipe(ialu_reg_reg); 7744 %} 7745 7746 // Register Arithmetic Shift Right Immediate 7747 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7748 match(Set dst (RShiftI src1 src2)); 7749 7750 size(4); 7751 format %{ "SRA $src1,$src2,$dst" %} 7752 opcode(Assembler::sra_op3, Assembler::arith_op); 7753 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7754 ins_pipe(ialu_reg_imm); 7755 %} 7756 7757 // Register Shift Right Arithmatic Long 7758 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7759 match(Set dst (RShiftL src1 src2)); 7760 7761 size(4); 7762 format %{ "SRAX $src1,$src2,$dst" %} 7763 opcode(Assembler::srax_op3, Assembler::arith_op); 7764 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7765 ins_pipe(ialu_reg_reg); 7766 %} 7767 7768 // Register Shift Left Immediate 7769 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7770 match(Set dst (RShiftL src1 src2)); 7771 7772 size(4); 7773 format %{ "SRAX $src1,$src2,$dst" %} 7774 opcode(Assembler::srax_op3, Assembler::arith_op); 7775 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7776 ins_pipe(ialu_reg_imm); 7777 %} 7778 7779 // Register Shift Right 7780 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7781 match(Set dst (URShiftI src1 src2)); 7782 7783 size(4); 7784 format %{ "SRL $src1,$src2,$dst" %} 7785 opcode(Assembler::srl_op3, Assembler::arith_op); 7786 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7787 ins_pipe(ialu_reg_reg); 7788 %} 7789 7790 // Register Shift Right Immediate 7791 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7792 match(Set dst (URShiftI src1 src2)); 7793 7794 size(4); 7795 format %{ "SRL $src1,$src2,$dst" %} 7796 opcode(Assembler::srl_op3, Assembler::arith_op); 7797 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7798 ins_pipe(ialu_reg_imm); 7799 %} 7800 7801 // Register Shift Right 7802 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7803 match(Set dst (URShiftL src1 src2)); 7804 7805 size(4); 7806 format %{ "SRLX $src1,$src2,$dst" %} 7807 opcode(Assembler::srlx_op3, Assembler::arith_op); 7808 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7809 ins_pipe(ialu_reg_reg); 7810 %} 7811 7812 // Register Shift Right Immediate 7813 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7814 match(Set dst (URShiftL src1 src2)); 7815 7816 size(4); 7817 format %{ "SRLX $src1,$src2,$dst" %} 7818 opcode(Assembler::srlx_op3, Assembler::arith_op); 7819 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7820 ins_pipe(ialu_reg_imm); 7821 %} 7822 7823 // Register Shift Right Immediate with a CastP2X 7824 #ifdef _LP64 7825 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7826 match(Set dst (URShiftL (CastP2X src1) src2)); 7827 size(4); 7828 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7829 opcode(Assembler::srlx_op3, Assembler::arith_op); 7830 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7831 ins_pipe(ialu_reg_imm); 7832 %} 7833 #else 7834 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{ 7835 match(Set dst (URShiftI (CastP2X src1) src2)); 7836 size(4); 7837 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} 7838 opcode(Assembler::srl_op3, Assembler::arith_op); 7839 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7840 ins_pipe(ialu_reg_imm); 7841 %} 7842 #endif 7843 7844 7845 //----------Floating Point Arithmetic Instructions----------------------------- 7846 7847 // Add float single precision 7848 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7849 match(Set dst (AddF src1 src2)); 7850 7851 size(4); 7852 format %{ "FADDS $src1,$src2,$dst" %} 7853 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7854 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7855 ins_pipe(faddF_reg_reg); 7856 %} 7857 7858 // Add float double precision 7859 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7860 match(Set dst (AddD src1 src2)); 7861 7862 size(4); 7863 format %{ "FADDD $src1,$src2,$dst" %} 7864 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7865 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7866 ins_pipe(faddD_reg_reg); 7867 %} 7868 7869 // Sub float single precision 7870 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7871 match(Set dst (SubF src1 src2)); 7872 7873 size(4); 7874 format %{ "FSUBS $src1,$src2,$dst" %} 7875 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7876 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7877 ins_pipe(faddF_reg_reg); 7878 %} 7879 7880 // Sub float double precision 7881 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7882 match(Set dst (SubD src1 src2)); 7883 7884 size(4); 7885 format %{ "FSUBD $src1,$src2,$dst" %} 7886 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7887 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7888 ins_pipe(faddD_reg_reg); 7889 %} 7890 7891 // Mul float single precision 7892 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7893 match(Set dst (MulF src1 src2)); 7894 7895 size(4); 7896 format %{ "FMULS $src1,$src2,$dst" %} 7897 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7898 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7899 ins_pipe(fmulF_reg_reg); 7900 %} 7901 7902 // Mul float double precision 7903 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7904 match(Set dst (MulD src1 src2)); 7905 7906 size(4); 7907 format %{ "FMULD $src1,$src2,$dst" %} 7908 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7909 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7910 ins_pipe(fmulD_reg_reg); 7911 %} 7912 7913 // Div float single precision 7914 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7915 match(Set dst (DivF src1 src2)); 7916 7917 size(4); 7918 format %{ "FDIVS $src1,$src2,$dst" %} 7919 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7920 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7921 ins_pipe(fdivF_reg_reg); 7922 %} 7923 7924 // Div float double precision 7925 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7926 match(Set dst (DivD src1 src2)); 7927 7928 size(4); 7929 format %{ "FDIVD $src1,$src2,$dst" %} 7930 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7931 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7932 ins_pipe(fdivD_reg_reg); 7933 %} 7934 7935 // Absolute float double precision 7936 instruct absD_reg(regD dst, regD src) %{ 7937 match(Set dst (AbsD src)); 7938 7939 format %{ "FABSd $src,$dst" %} 7940 ins_encode(fabsd(dst, src)); 7941 ins_pipe(faddD_reg); 7942 %} 7943 7944 // Absolute float single precision 7945 instruct absF_reg(regF dst, regF src) %{ 7946 match(Set dst (AbsF src)); 7947 7948 format %{ "FABSs $src,$dst" %} 7949 ins_encode(fabss(dst, src)); 7950 ins_pipe(faddF_reg); 7951 %} 7952 7953 instruct negF_reg(regF dst, regF src) %{ 7954 match(Set dst (NegF src)); 7955 7956 size(4); 7957 format %{ "FNEGs $src,$dst" %} 7958 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 7959 ins_encode(form3_opf_rs2F_rdF(src, dst)); 7960 ins_pipe(faddF_reg); 7961 %} 7962 7963 instruct negD_reg(regD dst, regD src) %{ 7964 match(Set dst (NegD src)); 7965 7966 format %{ "FNEGd $src,$dst" %} 7967 ins_encode(fnegd(dst, src)); 7968 ins_pipe(faddD_reg); 7969 %} 7970 7971 // Sqrt float double precision 7972 instruct sqrtF_reg_reg(regF dst, regF src) %{ 7973 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 7974 7975 size(4); 7976 format %{ "FSQRTS $src,$dst" %} 7977 ins_encode(fsqrts(dst, src)); 7978 ins_pipe(fdivF_reg_reg); 7979 %} 7980 7981 // Sqrt float double precision 7982 instruct sqrtD_reg_reg(regD dst, regD src) %{ 7983 match(Set dst (SqrtD src)); 7984 7985 size(4); 7986 format %{ "FSQRTD $src,$dst" %} 7987 ins_encode(fsqrtd(dst, src)); 7988 ins_pipe(fdivD_reg_reg); 7989 %} 7990 7991 //----------Logical Instructions----------------------------------------------- 7992 // And Instructions 7993 // Register And 7994 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7995 match(Set dst (AndI src1 src2)); 7996 7997 size(4); 7998 format %{ "AND $src1,$src2,$dst" %} 7999 opcode(Assembler::and_op3, Assembler::arith_op); 8000 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8001 ins_pipe(ialu_reg_reg); 8002 %} 8003 8004 // Immediate And 8005 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8006 match(Set dst (AndI src1 src2)); 8007 8008 size(4); 8009 format %{ "AND $src1,$src2,$dst" %} 8010 opcode(Assembler::and_op3, Assembler::arith_op); 8011 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8012 ins_pipe(ialu_reg_imm); 8013 %} 8014 8015 // Register And Long 8016 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8017 match(Set dst (AndL src1 src2)); 8018 8019 ins_cost(DEFAULT_COST); 8020 size(4); 8021 format %{ "AND $src1,$src2,$dst\t! long" %} 8022 opcode(Assembler::and_op3, Assembler::arith_op); 8023 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8024 ins_pipe(ialu_reg_reg); 8025 %} 8026 8027 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8028 match(Set dst (AndL src1 con)); 8029 8030 ins_cost(DEFAULT_COST); 8031 size(4); 8032 format %{ "AND $src1,$con,$dst\t! long" %} 8033 opcode(Assembler::and_op3, Assembler::arith_op); 8034 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8035 ins_pipe(ialu_reg_imm); 8036 %} 8037 8038 // Or Instructions 8039 // Register Or 8040 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8041 match(Set dst (OrI src1 src2)); 8042 8043 size(4); 8044 format %{ "OR $src1,$src2,$dst" %} 8045 opcode(Assembler::or_op3, Assembler::arith_op); 8046 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8047 ins_pipe(ialu_reg_reg); 8048 %} 8049 8050 // Immediate Or 8051 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8052 match(Set dst (OrI src1 src2)); 8053 8054 size(4); 8055 format %{ "OR $src1,$src2,$dst" %} 8056 opcode(Assembler::or_op3, Assembler::arith_op); 8057 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8058 ins_pipe(ialu_reg_imm); 8059 %} 8060 8061 // Register Or Long 8062 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8063 match(Set dst (OrL src1 src2)); 8064 8065 ins_cost(DEFAULT_COST); 8066 size(4); 8067 format %{ "OR $src1,$src2,$dst\t! long" %} 8068 opcode(Assembler::or_op3, Assembler::arith_op); 8069 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8070 ins_pipe(ialu_reg_reg); 8071 %} 8072 8073 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8074 match(Set dst (OrL src1 con)); 8075 ins_cost(DEFAULT_COST*2); 8076 8077 ins_cost(DEFAULT_COST); 8078 size(4); 8079 format %{ "OR $src1,$con,$dst\t! long" %} 8080 opcode(Assembler::or_op3, Assembler::arith_op); 8081 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8082 ins_pipe(ialu_reg_imm); 8083 %} 8084 8085 #ifndef _LP64 8086 8087 // Use sp_ptr_RegP to match G2 (TLS register) without spilling. 8088 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ 8089 match(Set dst (OrI src1 (CastP2X src2))); 8090 8091 size(4); 8092 format %{ "OR $src1,$src2,$dst" %} 8093 opcode(Assembler::or_op3, Assembler::arith_op); 8094 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8095 ins_pipe(ialu_reg_reg); 8096 %} 8097 8098 #else 8099 8100 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 8101 match(Set dst (OrL src1 (CastP2X src2))); 8102 8103 ins_cost(DEFAULT_COST); 8104 size(4); 8105 format %{ "OR $src1,$src2,$dst\t! long" %} 8106 opcode(Assembler::or_op3, Assembler::arith_op); 8107 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8108 ins_pipe(ialu_reg_reg); 8109 %} 8110 8111 #endif 8112 8113 // Xor Instructions 8114 // Register Xor 8115 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8116 match(Set dst (XorI src1 src2)); 8117 8118 size(4); 8119 format %{ "XOR $src1,$src2,$dst" %} 8120 opcode(Assembler::xor_op3, Assembler::arith_op); 8121 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8122 ins_pipe(ialu_reg_reg); 8123 %} 8124 8125 // Immediate Xor 8126 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8127 match(Set dst (XorI src1 src2)); 8128 8129 size(4); 8130 format %{ "XOR $src1,$src2,$dst" %} 8131 opcode(Assembler::xor_op3, Assembler::arith_op); 8132 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8133 ins_pipe(ialu_reg_imm); 8134 %} 8135 8136 // Register Xor Long 8137 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8138 match(Set dst (XorL src1 src2)); 8139 8140 ins_cost(DEFAULT_COST); 8141 size(4); 8142 format %{ "XOR $src1,$src2,$dst\t! long" %} 8143 opcode(Assembler::xor_op3, Assembler::arith_op); 8144 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8145 ins_pipe(ialu_reg_reg); 8146 %} 8147 8148 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8149 match(Set dst (XorL src1 con)); 8150 8151 ins_cost(DEFAULT_COST); 8152 size(4); 8153 format %{ "XOR $src1,$con,$dst\t! long" %} 8154 opcode(Assembler::xor_op3, Assembler::arith_op); 8155 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8156 ins_pipe(ialu_reg_imm); 8157 %} 8158 8159 //----------Convert to Boolean------------------------------------------------- 8160 // Nice hack for 32-bit tests but doesn't work for 8161 // 64-bit pointers. 8162 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 8163 match(Set dst (Conv2B src)); 8164 effect( KILL ccr ); 8165 ins_cost(DEFAULT_COST*2); 8166 format %{ "CMP R_G0,$src\n\t" 8167 "ADDX R_G0,0,$dst" %} 8168 ins_encode( enc_to_bool( src, dst ) ); 8169 ins_pipe(ialu_reg_ialu); 8170 %} 8171 8172 #ifndef _LP64 8173 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{ 8174 match(Set dst (Conv2B src)); 8175 effect( KILL ccr ); 8176 ins_cost(DEFAULT_COST*2); 8177 format %{ "CMP R_G0,$src\n\t" 8178 "ADDX R_G0,0,$dst" %} 8179 ins_encode( enc_to_bool( src, dst ) ); 8180 ins_pipe(ialu_reg_ialu); 8181 %} 8182 #else 8183 instruct convP2B( iRegI dst, iRegP src ) %{ 8184 match(Set dst (Conv2B src)); 8185 ins_cost(DEFAULT_COST*2); 8186 format %{ "MOV $src,$dst\n\t" 8187 "MOVRNZ $src,1,$dst" %} 8188 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 8189 ins_pipe(ialu_clr_and_mover); 8190 %} 8191 #endif 8192 8193 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 8194 match(Set dst (CmpLTMask src zero)); 8195 effect(KILL ccr); 8196 size(4); 8197 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 8198 ins_encode %{ 8199 __ sra($src$$Register, 31, $dst$$Register); 8200 %} 8201 ins_pipe(ialu_reg_imm); 8202 %} 8203 8204 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 8205 match(Set dst (CmpLTMask p q)); 8206 effect( KILL ccr ); 8207 ins_cost(DEFAULT_COST*4); 8208 format %{ "CMP $p,$q\n\t" 8209 "MOV #0,$dst\n\t" 8210 "BLT,a .+8\n\t" 8211 "MOV #-1,$dst" %} 8212 ins_encode( enc_ltmask(p,q,dst) ); 8213 ins_pipe(ialu_reg_reg_ialu); 8214 %} 8215 8216 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 8217 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 8218 effect(KILL ccr, TEMP tmp); 8219 ins_cost(DEFAULT_COST*3); 8220 8221 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 8222 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 8223 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 8224 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); 8225 ins_pipe(cadd_cmpltmask); 8226 %} 8227 8228 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ 8229 match(Set p (AndI (CmpLTMask p q) y)); 8230 effect(KILL ccr); 8231 ins_cost(DEFAULT_COST*3); 8232 8233 format %{ "CMP $p,$q\n\t" 8234 "MOV $y,$p\n\t" 8235 "MOVge G0,$p" %} 8236 ins_encode %{ 8237 __ cmp($p$$Register, $q$$Register); 8238 __ mov($y$$Register, $p$$Register); 8239 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); 8240 %} 8241 ins_pipe(ialu_reg_reg_ialu); 8242 %} 8243 8244 //----------------------------------------------------------------- 8245 // Direct raw moves between float and general registers using VIS3. 8246 8247 // ins_pipe(faddF_reg); 8248 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 8249 predicate(UseVIS >= 3); 8250 match(Set dst (MoveF2I src)); 8251 8252 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 8253 ins_encode %{ 8254 __ movstouw($src$$FloatRegister, $dst$$Register); 8255 %} 8256 ins_pipe(ialu_reg_reg); 8257 %} 8258 8259 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 8260 predicate(UseVIS >= 3); 8261 match(Set dst (MoveI2F src)); 8262 8263 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 8264 ins_encode %{ 8265 __ movwtos($src$$Register, $dst$$FloatRegister); 8266 %} 8267 ins_pipe(ialu_reg_reg); 8268 %} 8269 8270 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8271 predicate(UseVIS >= 3); 8272 match(Set dst (MoveD2L src)); 8273 8274 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8275 ins_encode %{ 8276 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8277 %} 8278 ins_pipe(ialu_reg_reg); 8279 %} 8280 8281 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8282 predicate(UseVIS >= 3); 8283 match(Set dst (MoveL2D src)); 8284 8285 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8286 ins_encode %{ 8287 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8288 %} 8289 ins_pipe(ialu_reg_reg); 8290 %} 8291 8292 8293 // Raw moves between float and general registers using stack. 8294 8295 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8296 match(Set dst (MoveF2I src)); 8297 effect(DEF dst, USE src); 8298 ins_cost(MEMORY_REF_COST); 8299 8300 size(4); 8301 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8302 opcode(Assembler::lduw_op3); 8303 ins_encode(simple_form3_mem_reg( src, dst ) ); 8304 ins_pipe(iload_mem); 8305 %} 8306 8307 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8308 match(Set dst (MoveI2F src)); 8309 effect(DEF dst, USE src); 8310 ins_cost(MEMORY_REF_COST); 8311 8312 size(4); 8313 format %{ "LDF $src,$dst\t! MoveI2F" %} 8314 opcode(Assembler::ldf_op3); 8315 ins_encode(simple_form3_mem_reg(src, dst)); 8316 ins_pipe(floadF_stk); 8317 %} 8318 8319 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8320 match(Set dst (MoveD2L src)); 8321 effect(DEF dst, USE src); 8322 ins_cost(MEMORY_REF_COST); 8323 8324 size(4); 8325 format %{ "LDX $src,$dst\t! MoveD2L" %} 8326 opcode(Assembler::ldx_op3); 8327 ins_encode(simple_form3_mem_reg( src, dst ) ); 8328 ins_pipe(iload_mem); 8329 %} 8330 8331 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8332 match(Set dst (MoveL2D src)); 8333 effect(DEF dst, USE src); 8334 ins_cost(MEMORY_REF_COST); 8335 8336 size(4); 8337 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8338 opcode(Assembler::lddf_op3); 8339 ins_encode(simple_form3_mem_reg(src, dst)); 8340 ins_pipe(floadD_stk); 8341 %} 8342 8343 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8344 match(Set dst (MoveF2I src)); 8345 effect(DEF dst, USE src); 8346 ins_cost(MEMORY_REF_COST); 8347 8348 size(4); 8349 format %{ "STF $src,$dst\t! MoveF2I" %} 8350 opcode(Assembler::stf_op3); 8351 ins_encode(simple_form3_mem_reg(dst, src)); 8352 ins_pipe(fstoreF_stk_reg); 8353 %} 8354 8355 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8356 match(Set dst (MoveI2F src)); 8357 effect(DEF dst, USE src); 8358 ins_cost(MEMORY_REF_COST); 8359 8360 size(4); 8361 format %{ "STW $src,$dst\t! MoveI2F" %} 8362 opcode(Assembler::stw_op3); 8363 ins_encode(simple_form3_mem_reg( dst, src ) ); 8364 ins_pipe(istore_mem_reg); 8365 %} 8366 8367 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8368 match(Set dst (MoveD2L src)); 8369 effect(DEF dst, USE src); 8370 ins_cost(MEMORY_REF_COST); 8371 8372 size(4); 8373 format %{ "STDF $src,$dst\t! MoveD2L" %} 8374 opcode(Assembler::stdf_op3); 8375 ins_encode(simple_form3_mem_reg(dst, src)); 8376 ins_pipe(fstoreD_stk_reg); 8377 %} 8378 8379 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8380 match(Set dst (MoveL2D src)); 8381 effect(DEF dst, USE src); 8382 ins_cost(MEMORY_REF_COST); 8383 8384 size(4); 8385 format %{ "STX $src,$dst\t! MoveL2D" %} 8386 opcode(Assembler::stx_op3); 8387 ins_encode(simple_form3_mem_reg( dst, src ) ); 8388 ins_pipe(istore_mem_reg); 8389 %} 8390 8391 8392 //----------Arithmetic Conversion Instructions--------------------------------- 8393 // The conversions operations are all Alpha sorted. Please keep it that way! 8394 8395 instruct convD2F_reg(regF dst, regD src) %{ 8396 match(Set dst (ConvD2F src)); 8397 size(4); 8398 format %{ "FDTOS $src,$dst" %} 8399 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8400 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8401 ins_pipe(fcvtD2F); 8402 %} 8403 8404 8405 // Convert a double to an int in a float register. 8406 // If the double is a NAN, stuff a zero in instead. 8407 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8408 effect(DEF dst, USE src, KILL fcc0); 8409 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8410 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8411 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8412 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8413 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8414 "skip:" %} 8415 ins_encode(form_d2i_helper(src,dst)); 8416 ins_pipe(fcvtD2I); 8417 %} 8418 8419 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8420 match(Set dst (ConvD2I src)); 8421 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8422 expand %{ 8423 regF tmp; 8424 convD2I_helper(tmp, src); 8425 regF_to_stkI(dst, tmp); 8426 %} 8427 %} 8428 8429 instruct convD2I_reg(iRegI dst, regD src) %{ 8430 predicate(UseVIS >= 3); 8431 match(Set dst (ConvD2I src)); 8432 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8433 expand %{ 8434 regF tmp; 8435 convD2I_helper(tmp, src); 8436 MoveF2I_reg_reg(dst, tmp); 8437 %} 8438 %} 8439 8440 8441 // Convert a double to a long in a double register. 8442 // If the double is a NAN, stuff a zero in instead. 8443 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8444 effect(DEF dst, USE src, KILL fcc0); 8445 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8446 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8447 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8448 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8449 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8450 "skip:" %} 8451 ins_encode(form_d2l_helper(src,dst)); 8452 ins_pipe(fcvtD2L); 8453 %} 8454 8455 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8456 match(Set dst (ConvD2L src)); 8457 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8458 expand %{ 8459 regD tmp; 8460 convD2L_helper(tmp, src); 8461 regD_to_stkL(dst, tmp); 8462 %} 8463 %} 8464 8465 instruct convD2L_reg(iRegL dst, regD src) %{ 8466 predicate(UseVIS >= 3); 8467 match(Set dst (ConvD2L src)); 8468 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8469 expand %{ 8470 regD tmp; 8471 convD2L_helper(tmp, src); 8472 MoveD2L_reg_reg(dst, tmp); 8473 %} 8474 %} 8475 8476 8477 instruct convF2D_reg(regD dst, regF src) %{ 8478 match(Set dst (ConvF2D src)); 8479 format %{ "FSTOD $src,$dst" %} 8480 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8481 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8482 ins_pipe(fcvtF2D); 8483 %} 8484 8485 8486 // Convert a float to an int in a float register. 8487 // If the float is a NAN, stuff a zero in instead. 8488 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8489 effect(DEF dst, USE src, KILL fcc0); 8490 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8491 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8492 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8493 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8494 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8495 "skip:" %} 8496 ins_encode(form_f2i_helper(src,dst)); 8497 ins_pipe(fcvtF2I); 8498 %} 8499 8500 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8501 match(Set dst (ConvF2I src)); 8502 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8503 expand %{ 8504 regF tmp; 8505 convF2I_helper(tmp, src); 8506 regF_to_stkI(dst, tmp); 8507 %} 8508 %} 8509 8510 instruct convF2I_reg(iRegI dst, regF src) %{ 8511 predicate(UseVIS >= 3); 8512 match(Set dst (ConvF2I src)); 8513 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8514 expand %{ 8515 regF tmp; 8516 convF2I_helper(tmp, src); 8517 MoveF2I_reg_reg(dst, tmp); 8518 %} 8519 %} 8520 8521 8522 // Convert a float to a long in a float register. 8523 // If the float is a NAN, stuff a zero in instead. 8524 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8525 effect(DEF dst, USE src, KILL fcc0); 8526 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8527 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8528 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8529 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8530 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8531 "skip:" %} 8532 ins_encode(form_f2l_helper(src,dst)); 8533 ins_pipe(fcvtF2L); 8534 %} 8535 8536 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8537 match(Set dst (ConvF2L src)); 8538 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8539 expand %{ 8540 regD tmp; 8541 convF2L_helper(tmp, src); 8542 regD_to_stkL(dst, tmp); 8543 %} 8544 %} 8545 8546 instruct convF2L_reg(iRegL dst, regF src) %{ 8547 predicate(UseVIS >= 3); 8548 match(Set dst (ConvF2L src)); 8549 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8550 expand %{ 8551 regD tmp; 8552 convF2L_helper(tmp, src); 8553 MoveD2L_reg_reg(dst, tmp); 8554 %} 8555 %} 8556 8557 8558 instruct convI2D_helper(regD dst, regF tmp) %{ 8559 effect(USE tmp, DEF dst); 8560 format %{ "FITOD $tmp,$dst" %} 8561 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8562 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8563 ins_pipe(fcvtI2D); 8564 %} 8565 8566 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8567 match(Set dst (ConvI2D src)); 8568 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8569 expand %{ 8570 regF tmp; 8571 stkI_to_regF(tmp, src); 8572 convI2D_helper(dst, tmp); 8573 %} 8574 %} 8575 8576 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8577 predicate(UseVIS >= 3); 8578 match(Set dst (ConvI2D src)); 8579 expand %{ 8580 regF tmp; 8581 MoveI2F_reg_reg(tmp, src); 8582 convI2D_helper(dst, tmp); 8583 %} 8584 %} 8585 8586 instruct convI2D_mem(regD_low dst, memory mem) %{ 8587 match(Set dst (ConvI2D (LoadI mem))); 8588 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8589 size(8); 8590 format %{ "LDF $mem,$dst\n\t" 8591 "FITOD $dst,$dst" %} 8592 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8593 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8594 ins_pipe(floadF_mem); 8595 %} 8596 8597 8598 instruct convI2F_helper(regF dst, regF tmp) %{ 8599 effect(DEF dst, USE tmp); 8600 format %{ "FITOS $tmp,$dst" %} 8601 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8602 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8603 ins_pipe(fcvtI2F); 8604 %} 8605 8606 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8607 match(Set dst (ConvI2F src)); 8608 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8609 expand %{ 8610 regF tmp; 8611 stkI_to_regF(tmp,src); 8612 convI2F_helper(dst, tmp); 8613 %} 8614 %} 8615 8616 instruct convI2F_reg(regF dst, iRegI src) %{ 8617 predicate(UseVIS >= 3); 8618 match(Set dst (ConvI2F src)); 8619 ins_cost(DEFAULT_COST); 8620 expand %{ 8621 regF tmp; 8622 MoveI2F_reg_reg(tmp, src); 8623 convI2F_helper(dst, tmp); 8624 %} 8625 %} 8626 8627 instruct convI2F_mem( regF dst, memory mem ) %{ 8628 match(Set dst (ConvI2F (LoadI mem))); 8629 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8630 size(8); 8631 format %{ "LDF $mem,$dst\n\t" 8632 "FITOS $dst,$dst" %} 8633 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8634 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8635 ins_pipe(floadF_mem); 8636 %} 8637 8638 8639 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8640 match(Set dst (ConvI2L src)); 8641 size(4); 8642 format %{ "SRA $src,0,$dst\t! int->long" %} 8643 opcode(Assembler::sra_op3, Assembler::arith_op); 8644 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8645 ins_pipe(ialu_reg_reg); 8646 %} 8647 8648 // Zero-extend convert int to long 8649 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8650 match(Set dst (AndL (ConvI2L src) mask) ); 8651 size(4); 8652 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8653 opcode(Assembler::srl_op3, Assembler::arith_op); 8654 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8655 ins_pipe(ialu_reg_reg); 8656 %} 8657 8658 // Zero-extend long 8659 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8660 match(Set dst (AndL src mask) ); 8661 size(4); 8662 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8663 opcode(Assembler::srl_op3, Assembler::arith_op); 8664 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8665 ins_pipe(ialu_reg_reg); 8666 %} 8667 8668 8669 //----------- 8670 // Long to Double conversion using V8 opcodes. 8671 // Still useful because cheetah traps and becomes 8672 // amazingly slow for some common numbers. 8673 8674 // Magic constant, 0x43300000 8675 instruct loadConI_x43300000(iRegI dst) %{ 8676 effect(DEF dst); 8677 size(4); 8678 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8679 ins_encode(SetHi22(0x43300000, dst)); 8680 ins_pipe(ialu_none); 8681 %} 8682 8683 // Magic constant, 0x41f00000 8684 instruct loadConI_x41f00000(iRegI dst) %{ 8685 effect(DEF dst); 8686 size(4); 8687 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8688 ins_encode(SetHi22(0x41f00000, dst)); 8689 ins_pipe(ialu_none); 8690 %} 8691 8692 // Construct a double from two float halves 8693 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8694 effect(DEF dst, USE src1, USE src2); 8695 size(8); 8696 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8697 "FMOVS $src2.lo,$dst.lo" %} 8698 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8699 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8700 ins_pipe(faddD_reg_reg); 8701 %} 8702 8703 // Convert integer in high half of a double register (in the lower half of 8704 // the double register file) to double 8705 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8706 effect(DEF dst, USE src); 8707 size(4); 8708 format %{ "FITOD $src,$dst" %} 8709 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8710 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8711 ins_pipe(fcvtLHi2D); 8712 %} 8713 8714 // Add float double precision 8715 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8716 effect(DEF dst, USE src1, USE src2); 8717 size(4); 8718 format %{ "FADDD $src1,$src2,$dst" %} 8719 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8720 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8721 ins_pipe(faddD_reg_reg); 8722 %} 8723 8724 // Sub float double precision 8725 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8726 effect(DEF dst, USE src1, USE src2); 8727 size(4); 8728 format %{ "FSUBD $src1,$src2,$dst" %} 8729 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8730 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8731 ins_pipe(faddD_reg_reg); 8732 %} 8733 8734 // Mul float double precision 8735 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8736 effect(DEF dst, USE src1, USE src2); 8737 size(4); 8738 format %{ "FMULD $src1,$src2,$dst" %} 8739 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8740 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8741 ins_pipe(fmulD_reg_reg); 8742 %} 8743 8744 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{ 8745 match(Set dst (ConvL2D src)); 8746 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); 8747 8748 expand %{ 8749 regD_low tmpsrc; 8750 iRegI ix43300000; 8751 iRegI ix41f00000; 8752 stackSlotL lx43300000; 8753 stackSlotL lx41f00000; 8754 regD_low dx43300000; 8755 regD dx41f00000; 8756 regD tmp1; 8757 regD_low tmp2; 8758 regD tmp3; 8759 regD tmp4; 8760 8761 stkL_to_regD(tmpsrc, src); 8762 8763 loadConI_x43300000(ix43300000); 8764 loadConI_x41f00000(ix41f00000); 8765 regI_to_stkLHi(lx43300000, ix43300000); 8766 regI_to_stkLHi(lx41f00000, ix41f00000); 8767 stkL_to_regD(dx43300000, lx43300000); 8768 stkL_to_regD(dx41f00000, lx41f00000); 8769 8770 convI2D_regDHi_regD(tmp1, tmpsrc); 8771 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc); 8772 subD_regD_regD(tmp3, tmp2, dx43300000); 8773 mulD_regD_regD(tmp4, tmp1, dx41f00000); 8774 addD_regD_regD(dst, tmp3, tmp4); 8775 %} 8776 %} 8777 8778 // Long to Double conversion using fast fxtof 8779 instruct convL2D_helper(regD dst, regD tmp) %{ 8780 effect(DEF dst, USE tmp); 8781 size(4); 8782 format %{ "FXTOD $tmp,$dst" %} 8783 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8784 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8785 ins_pipe(fcvtL2D); 8786 %} 8787 8788 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8789 predicate(VM_Version::has_fast_fxtof()); 8790 match(Set dst (ConvL2D src)); 8791 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8792 expand %{ 8793 regD tmp; 8794 stkL_to_regD(tmp, src); 8795 convL2D_helper(dst, tmp); 8796 %} 8797 %} 8798 8799 instruct convL2D_reg(regD dst, iRegL src) %{ 8800 predicate(UseVIS >= 3); 8801 match(Set dst (ConvL2D src)); 8802 expand %{ 8803 regD tmp; 8804 MoveL2D_reg_reg(tmp, src); 8805 convL2D_helper(dst, tmp); 8806 %} 8807 %} 8808 8809 // Long to Float conversion using fast fxtof 8810 instruct convL2F_helper(regF dst, regD tmp) %{ 8811 effect(DEF dst, USE tmp); 8812 size(4); 8813 format %{ "FXTOS $tmp,$dst" %} 8814 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8815 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8816 ins_pipe(fcvtL2F); 8817 %} 8818 8819 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8820 match(Set dst (ConvL2F src)); 8821 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8822 expand %{ 8823 regD tmp; 8824 stkL_to_regD(tmp, src); 8825 convL2F_helper(dst, tmp); 8826 %} 8827 %} 8828 8829 instruct convL2F_reg(regF dst, iRegL src) %{ 8830 predicate(UseVIS >= 3); 8831 match(Set dst (ConvL2F src)); 8832 ins_cost(DEFAULT_COST); 8833 expand %{ 8834 regD tmp; 8835 MoveL2D_reg_reg(tmp, src); 8836 convL2F_helper(dst, tmp); 8837 %} 8838 %} 8839 8840 //----------- 8841 8842 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8843 match(Set dst (ConvL2I src)); 8844 #ifndef _LP64 8845 format %{ "MOV $src.lo,$dst\t! long->int" %} 8846 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) ); 8847 ins_pipe(ialu_move_reg_I_to_L); 8848 #else 8849 size(4); 8850 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8851 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8852 ins_pipe(ialu_reg); 8853 #endif 8854 %} 8855 8856 // Register Shift Right Immediate 8857 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8858 match(Set dst (ConvL2I (RShiftL src cnt))); 8859 8860 size(4); 8861 format %{ "SRAX $src,$cnt,$dst" %} 8862 opcode(Assembler::srax_op3, Assembler::arith_op); 8863 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8864 ins_pipe(ialu_reg_imm); 8865 %} 8866 8867 //----------Control Flow Instructions------------------------------------------ 8868 // Compare Instructions 8869 // Compare Integers 8870 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 8871 match(Set icc (CmpI op1 op2)); 8872 effect( DEF icc, USE op1, USE op2 ); 8873 8874 size(4); 8875 format %{ "CMP $op1,$op2" %} 8876 opcode(Assembler::subcc_op3, Assembler::arith_op); 8877 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8878 ins_pipe(ialu_cconly_reg_reg); 8879 %} 8880 8881 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 8882 match(Set icc (CmpU op1 op2)); 8883 8884 size(4); 8885 format %{ "CMP $op1,$op2\t! unsigned" %} 8886 opcode(Assembler::subcc_op3, Assembler::arith_op); 8887 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8888 ins_pipe(ialu_cconly_reg_reg); 8889 %} 8890 8891 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 8892 match(Set icc (CmpI op1 op2)); 8893 effect( DEF icc, USE op1 ); 8894 8895 size(4); 8896 format %{ "CMP $op1,$op2" %} 8897 opcode(Assembler::subcc_op3, Assembler::arith_op); 8898 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8899 ins_pipe(ialu_cconly_reg_imm); 8900 %} 8901 8902 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 8903 match(Set icc (CmpI (AndI op1 op2) zero)); 8904 8905 size(4); 8906 format %{ "BTST $op2,$op1" %} 8907 opcode(Assembler::andcc_op3, Assembler::arith_op); 8908 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8909 ins_pipe(ialu_cconly_reg_reg_zero); 8910 %} 8911 8912 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 8913 match(Set icc (CmpI (AndI op1 op2) zero)); 8914 8915 size(4); 8916 format %{ "BTST $op2,$op1" %} 8917 opcode(Assembler::andcc_op3, Assembler::arith_op); 8918 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8919 ins_pipe(ialu_cconly_reg_imm_zero); 8920 %} 8921 8922 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 8923 match(Set xcc (CmpL op1 op2)); 8924 effect( DEF xcc, USE op1, USE op2 ); 8925 8926 size(4); 8927 format %{ "CMP $op1,$op2\t\t! long" %} 8928 opcode(Assembler::subcc_op3, Assembler::arith_op); 8929 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8930 ins_pipe(ialu_cconly_reg_reg); 8931 %} 8932 8933 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 8934 match(Set xcc (CmpL op1 con)); 8935 effect( DEF xcc, USE op1, USE con ); 8936 8937 size(4); 8938 format %{ "CMP $op1,$con\t\t! long" %} 8939 opcode(Assembler::subcc_op3, Assembler::arith_op); 8940 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8941 ins_pipe(ialu_cconly_reg_reg); 8942 %} 8943 8944 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 8945 match(Set xcc (CmpL (AndL op1 op2) zero)); 8946 effect( DEF xcc, USE op1, USE op2 ); 8947 8948 size(4); 8949 format %{ "BTST $op1,$op2\t\t! long" %} 8950 opcode(Assembler::andcc_op3, Assembler::arith_op); 8951 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8952 ins_pipe(ialu_cconly_reg_reg); 8953 %} 8954 8955 // useful for checking the alignment of a pointer: 8956 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 8957 match(Set xcc (CmpL (AndL op1 con) zero)); 8958 effect( DEF xcc, USE op1, USE con ); 8959 8960 size(4); 8961 format %{ "BTST $op1,$con\t\t! long" %} 8962 opcode(Assembler::andcc_op3, Assembler::arith_op); 8963 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8964 ins_pipe(ialu_cconly_reg_reg); 8965 %} 8966 8967 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{ 8968 match(Set icc (CmpU op1 op2)); 8969 8970 size(4); 8971 format %{ "CMP $op1,$op2\t! unsigned" %} 8972 opcode(Assembler::subcc_op3, Assembler::arith_op); 8973 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8974 ins_pipe(ialu_cconly_reg_imm); 8975 %} 8976 8977 // Compare Pointers 8978 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 8979 match(Set pcc (CmpP op1 op2)); 8980 8981 size(4); 8982 format %{ "CMP $op1,$op2\t! ptr" %} 8983 opcode(Assembler::subcc_op3, Assembler::arith_op); 8984 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8985 ins_pipe(ialu_cconly_reg_reg); 8986 %} 8987 8988 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 8989 match(Set pcc (CmpP op1 op2)); 8990 8991 size(4); 8992 format %{ "CMP $op1,$op2\t! ptr" %} 8993 opcode(Assembler::subcc_op3, Assembler::arith_op); 8994 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8995 ins_pipe(ialu_cconly_reg_imm); 8996 %} 8997 8998 // Compare Narrow oops 8999 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 9000 match(Set icc (CmpN op1 op2)); 9001 9002 size(4); 9003 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9004 opcode(Assembler::subcc_op3, Assembler::arith_op); 9005 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9006 ins_pipe(ialu_cconly_reg_reg); 9007 %} 9008 9009 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 9010 match(Set icc (CmpN op1 op2)); 9011 9012 size(4); 9013 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9014 opcode(Assembler::subcc_op3, Assembler::arith_op); 9015 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9016 ins_pipe(ialu_cconly_reg_imm); 9017 %} 9018 9019 //----------Max and Min-------------------------------------------------------- 9020 // Min Instructions 9021 // Conditional move for min 9022 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9023 effect( USE_DEF op2, USE op1, USE icc ); 9024 9025 size(4); 9026 format %{ "MOVlt icc,$op1,$op2\t! min" %} 9027 opcode(Assembler::less); 9028 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9029 ins_pipe(ialu_reg_flags); 9030 %} 9031 9032 // Min Register with Register. 9033 instruct minI_eReg(iRegI op1, iRegI op2) %{ 9034 match(Set op2 (MinI op1 op2)); 9035 ins_cost(DEFAULT_COST*2); 9036 expand %{ 9037 flagsReg icc; 9038 compI_iReg(icc,op1,op2); 9039 cmovI_reg_lt(op2,op1,icc); 9040 %} 9041 %} 9042 9043 // Max Instructions 9044 // Conditional move for max 9045 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9046 effect( USE_DEF op2, USE op1, USE icc ); 9047 format %{ "MOVgt icc,$op1,$op2\t! max" %} 9048 opcode(Assembler::greater); 9049 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9050 ins_pipe(ialu_reg_flags); 9051 %} 9052 9053 // Max Register with Register 9054 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 9055 match(Set op2 (MaxI op1 op2)); 9056 ins_cost(DEFAULT_COST*2); 9057 expand %{ 9058 flagsReg icc; 9059 compI_iReg(icc,op1,op2); 9060 cmovI_reg_gt(op2,op1,icc); 9061 %} 9062 %} 9063 9064 9065 //----------Float Compares---------------------------------------------------- 9066 // Compare floating, generate condition code 9067 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 9068 match(Set fcc (CmpF src1 src2)); 9069 9070 size(4); 9071 format %{ "FCMPs $fcc,$src1,$src2" %} 9072 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 9073 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 9074 ins_pipe(faddF_fcc_reg_reg_zero); 9075 %} 9076 9077 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 9078 match(Set fcc (CmpD src1 src2)); 9079 9080 size(4); 9081 format %{ "FCMPd $fcc,$src1,$src2" %} 9082 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 9083 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 9084 ins_pipe(faddD_fcc_reg_reg_zero); 9085 %} 9086 9087 9088 // Compare floating, generate -1,0,1 9089 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 9090 match(Set dst (CmpF3 src1 src2)); 9091 effect(KILL fcc0); 9092 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9093 format %{ "fcmpl $dst,$src1,$src2" %} 9094 // Primary = float 9095 opcode( true ); 9096 ins_encode( floating_cmp( dst, src1, src2 ) ); 9097 ins_pipe( floating_cmp ); 9098 %} 9099 9100 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 9101 match(Set dst (CmpD3 src1 src2)); 9102 effect(KILL fcc0); 9103 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9104 format %{ "dcmpl $dst,$src1,$src2" %} 9105 // Primary = double (not float) 9106 opcode( false ); 9107 ins_encode( floating_cmp( dst, src1, src2 ) ); 9108 ins_pipe( floating_cmp ); 9109 %} 9110 9111 //----------Branches--------------------------------------------------------- 9112 // Jump 9113 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 9114 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 9115 match(Jump switch_val); 9116 effect(TEMP table); 9117 9118 ins_cost(350); 9119 9120 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 9121 "LD [O7 + $switch_val], O7\n\t" 9122 "JUMP O7" %} 9123 ins_encode %{ 9124 // Calculate table address into a register. 9125 Register table_reg; 9126 Register label_reg = O7; 9127 // If we are calculating the size of this instruction don't trust 9128 // zero offsets because they might change when 9129 // MachConstantBaseNode decides to optimize the constant table 9130 // base. 9131 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 9132 table_reg = $constanttablebase; 9133 } else { 9134 table_reg = O7; 9135 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 9136 __ add($constanttablebase, con_offset, table_reg); 9137 } 9138 9139 // Jump to base address + switch value 9140 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 9141 __ jmp(label_reg, G0); 9142 __ delayed()->nop(); 9143 %} 9144 ins_pipe(ialu_reg_reg); 9145 %} 9146 9147 // Direct Branch. Use V8 version with longer range. 9148 instruct branch(label labl) %{ 9149 match(Goto); 9150 effect(USE labl); 9151 9152 size(8); 9153 ins_cost(BRANCH_COST); 9154 format %{ "BA $labl" %} 9155 ins_encode %{ 9156 Label* L = $labl$$label; 9157 __ ba(*L); 9158 __ delayed()->nop(); 9159 %} 9160 ins_pipe(br); 9161 %} 9162 9163 // Direct Branch, short with no delay slot 9164 instruct branch_short(label labl) %{ 9165 match(Goto); 9166 predicate(UseCBCond); 9167 effect(USE labl); 9168 9169 size(4); 9170 ins_cost(BRANCH_COST); 9171 format %{ "BA $labl\t! short branch" %} 9172 ins_encode %{ 9173 Label* L = $labl$$label; 9174 assert(__ use_cbcond(*L), "back to back cbcond"); 9175 __ ba_short(*L); 9176 %} 9177 ins_short_branch(1); 9178 ins_avoid_back_to_back(1); 9179 ins_pipe(cbcond_reg_imm); 9180 %} 9181 9182 // Conditional Direct Branch 9183 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 9184 match(If cmp icc); 9185 effect(USE labl); 9186 9187 size(8); 9188 ins_cost(BRANCH_COST); 9189 format %{ "BP$cmp $icc,$labl" %} 9190 // Prim = bits 24-22, Secnd = bits 31-30 9191 ins_encode( enc_bp( labl, cmp, icc ) ); 9192 ins_pipe(br_cc); 9193 %} 9194 9195 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9196 match(If cmp icc); 9197 effect(USE labl); 9198 9199 ins_cost(BRANCH_COST); 9200 format %{ "BP$cmp $icc,$labl" %} 9201 // Prim = bits 24-22, Secnd = bits 31-30 9202 ins_encode( enc_bp( labl, cmp, icc ) ); 9203 ins_pipe(br_cc); 9204 %} 9205 9206 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 9207 match(If cmp pcc); 9208 effect(USE labl); 9209 9210 size(8); 9211 ins_cost(BRANCH_COST); 9212 format %{ "BP$cmp $pcc,$labl" %} 9213 ins_encode %{ 9214 Label* L = $labl$$label; 9215 Assembler::Predict predict_taken = 9216 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9217 9218 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9219 __ delayed()->nop(); 9220 %} 9221 ins_pipe(br_cc); 9222 %} 9223 9224 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 9225 match(If cmp fcc); 9226 effect(USE labl); 9227 9228 size(8); 9229 ins_cost(BRANCH_COST); 9230 format %{ "FBP$cmp $fcc,$labl" %} 9231 ins_encode %{ 9232 Label* L = $labl$$label; 9233 Assembler::Predict predict_taken = 9234 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9235 9236 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 9237 __ delayed()->nop(); 9238 %} 9239 ins_pipe(br_fcc); 9240 %} 9241 9242 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 9243 match(CountedLoopEnd cmp icc); 9244 effect(USE labl); 9245 9246 size(8); 9247 ins_cost(BRANCH_COST); 9248 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9249 // Prim = bits 24-22, Secnd = bits 31-30 9250 ins_encode( enc_bp( labl, cmp, icc ) ); 9251 ins_pipe(br_cc); 9252 %} 9253 9254 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9255 match(CountedLoopEnd cmp icc); 9256 effect(USE labl); 9257 9258 size(8); 9259 ins_cost(BRANCH_COST); 9260 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9261 // Prim = bits 24-22, Secnd = bits 31-30 9262 ins_encode( enc_bp( labl, cmp, icc ) ); 9263 ins_pipe(br_cc); 9264 %} 9265 9266 // Compare and branch instructions 9267 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9268 match(If cmp (CmpI op1 op2)); 9269 effect(USE labl, KILL icc); 9270 9271 size(12); 9272 ins_cost(BRANCH_COST); 9273 format %{ "CMP $op1,$op2\t! int\n\t" 9274 "BP$cmp $labl" %} 9275 ins_encode %{ 9276 Label* L = $labl$$label; 9277 Assembler::Predict predict_taken = 9278 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9279 __ cmp($op1$$Register, $op2$$Register); 9280 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9281 __ delayed()->nop(); 9282 %} 9283 ins_pipe(cmp_br_reg_reg); 9284 %} 9285 9286 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9287 match(If cmp (CmpI op1 op2)); 9288 effect(USE labl, KILL icc); 9289 9290 size(12); 9291 ins_cost(BRANCH_COST); 9292 format %{ "CMP $op1,$op2\t! int\n\t" 9293 "BP$cmp $labl" %} 9294 ins_encode %{ 9295 Label* L = $labl$$label; 9296 Assembler::Predict predict_taken = 9297 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9298 __ cmp($op1$$Register, $op2$$constant); 9299 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9300 __ delayed()->nop(); 9301 %} 9302 ins_pipe(cmp_br_reg_imm); 9303 %} 9304 9305 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9306 match(If cmp (CmpU op1 op2)); 9307 effect(USE labl, KILL icc); 9308 9309 size(12); 9310 ins_cost(BRANCH_COST); 9311 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9312 "BP$cmp $labl" %} 9313 ins_encode %{ 9314 Label* L = $labl$$label; 9315 Assembler::Predict predict_taken = 9316 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9317 __ cmp($op1$$Register, $op2$$Register); 9318 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9319 __ delayed()->nop(); 9320 %} 9321 ins_pipe(cmp_br_reg_reg); 9322 %} 9323 9324 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9325 match(If cmp (CmpU op1 op2)); 9326 effect(USE labl, KILL icc); 9327 9328 size(12); 9329 ins_cost(BRANCH_COST); 9330 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9331 "BP$cmp $labl" %} 9332 ins_encode %{ 9333 Label* L = $labl$$label; 9334 Assembler::Predict predict_taken = 9335 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9336 __ cmp($op1$$Register, $op2$$constant); 9337 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9338 __ delayed()->nop(); 9339 %} 9340 ins_pipe(cmp_br_reg_imm); 9341 %} 9342 9343 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9344 match(If cmp (CmpL op1 op2)); 9345 effect(USE labl, KILL xcc); 9346 9347 size(12); 9348 ins_cost(BRANCH_COST); 9349 format %{ "CMP $op1,$op2\t! long\n\t" 9350 "BP$cmp $labl" %} 9351 ins_encode %{ 9352 Label* L = $labl$$label; 9353 Assembler::Predict predict_taken = 9354 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9355 __ cmp($op1$$Register, $op2$$Register); 9356 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9357 __ delayed()->nop(); 9358 %} 9359 ins_pipe(cmp_br_reg_reg); 9360 %} 9361 9362 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9363 match(If cmp (CmpL op1 op2)); 9364 effect(USE labl, KILL xcc); 9365 9366 size(12); 9367 ins_cost(BRANCH_COST); 9368 format %{ "CMP $op1,$op2\t! long\n\t" 9369 "BP$cmp $labl" %} 9370 ins_encode %{ 9371 Label* L = $labl$$label; 9372 Assembler::Predict predict_taken = 9373 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9374 __ cmp($op1$$Register, $op2$$constant); 9375 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9376 __ delayed()->nop(); 9377 %} 9378 ins_pipe(cmp_br_reg_imm); 9379 %} 9380 9381 // Compare Pointers and branch 9382 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9383 match(If cmp (CmpP op1 op2)); 9384 effect(USE labl, KILL pcc); 9385 9386 size(12); 9387 ins_cost(BRANCH_COST); 9388 format %{ "CMP $op1,$op2\t! ptr\n\t" 9389 "B$cmp $labl" %} 9390 ins_encode %{ 9391 Label* L = $labl$$label; 9392 Assembler::Predict predict_taken = 9393 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9394 __ cmp($op1$$Register, $op2$$Register); 9395 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9396 __ delayed()->nop(); 9397 %} 9398 ins_pipe(cmp_br_reg_reg); 9399 %} 9400 9401 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9402 match(If cmp (CmpP op1 null)); 9403 effect(USE labl, KILL pcc); 9404 9405 size(12); 9406 ins_cost(BRANCH_COST); 9407 format %{ "CMP $op1,0\t! ptr\n\t" 9408 "B$cmp $labl" %} 9409 ins_encode %{ 9410 Label* L = $labl$$label; 9411 Assembler::Predict predict_taken = 9412 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9413 __ cmp($op1$$Register, G0); 9414 // bpr() is not used here since it has shorter distance. 9415 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9416 __ delayed()->nop(); 9417 %} 9418 ins_pipe(cmp_br_reg_reg); 9419 %} 9420 9421 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9422 match(If cmp (CmpN op1 op2)); 9423 effect(USE labl, KILL icc); 9424 9425 size(12); 9426 ins_cost(BRANCH_COST); 9427 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9428 "BP$cmp $labl" %} 9429 ins_encode %{ 9430 Label* L = $labl$$label; 9431 Assembler::Predict predict_taken = 9432 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9433 __ cmp($op1$$Register, $op2$$Register); 9434 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9435 __ delayed()->nop(); 9436 %} 9437 ins_pipe(cmp_br_reg_reg); 9438 %} 9439 9440 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9441 match(If cmp (CmpN op1 null)); 9442 effect(USE labl, KILL icc); 9443 9444 size(12); 9445 ins_cost(BRANCH_COST); 9446 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9447 "BP$cmp $labl" %} 9448 ins_encode %{ 9449 Label* L = $labl$$label; 9450 Assembler::Predict predict_taken = 9451 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9452 __ cmp($op1$$Register, G0); 9453 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9454 __ delayed()->nop(); 9455 %} 9456 ins_pipe(cmp_br_reg_reg); 9457 %} 9458 9459 // Loop back branch 9460 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9461 match(CountedLoopEnd cmp (CmpI op1 op2)); 9462 effect(USE labl, KILL icc); 9463 9464 size(12); 9465 ins_cost(BRANCH_COST); 9466 format %{ "CMP $op1,$op2\t! int\n\t" 9467 "BP$cmp $labl\t! Loop end" %} 9468 ins_encode %{ 9469 Label* L = $labl$$label; 9470 Assembler::Predict predict_taken = 9471 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9472 __ cmp($op1$$Register, $op2$$Register); 9473 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9474 __ delayed()->nop(); 9475 %} 9476 ins_pipe(cmp_br_reg_reg); 9477 %} 9478 9479 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9480 match(CountedLoopEnd cmp (CmpI op1 op2)); 9481 effect(USE labl, KILL icc); 9482 9483 size(12); 9484 ins_cost(BRANCH_COST); 9485 format %{ "CMP $op1,$op2\t! int\n\t" 9486 "BP$cmp $labl\t! Loop end" %} 9487 ins_encode %{ 9488 Label* L = $labl$$label; 9489 Assembler::Predict predict_taken = 9490 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9491 __ cmp($op1$$Register, $op2$$constant); 9492 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9493 __ delayed()->nop(); 9494 %} 9495 ins_pipe(cmp_br_reg_imm); 9496 %} 9497 9498 // Short compare and branch instructions 9499 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9500 match(If cmp (CmpI op1 op2)); 9501 predicate(UseCBCond); 9502 effect(USE labl, KILL icc); 9503 9504 size(4); 9505 ins_cost(BRANCH_COST); 9506 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9507 ins_encode %{ 9508 Label* L = $labl$$label; 9509 assert(__ use_cbcond(*L), "back to back cbcond"); 9510 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9511 %} 9512 ins_short_branch(1); 9513 ins_avoid_back_to_back(1); 9514 ins_pipe(cbcond_reg_reg); 9515 %} 9516 9517 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9518 match(If cmp (CmpI op1 op2)); 9519 predicate(UseCBCond); 9520 effect(USE labl, KILL icc); 9521 9522 size(4); 9523 ins_cost(BRANCH_COST); 9524 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9525 ins_encode %{ 9526 Label* L = $labl$$label; 9527 assert(__ use_cbcond(*L), "back to back cbcond"); 9528 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9529 %} 9530 ins_short_branch(1); 9531 ins_avoid_back_to_back(1); 9532 ins_pipe(cbcond_reg_imm); 9533 %} 9534 9535 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9536 match(If cmp (CmpU op1 op2)); 9537 predicate(UseCBCond); 9538 effect(USE labl, KILL icc); 9539 9540 size(4); 9541 ins_cost(BRANCH_COST); 9542 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9543 ins_encode %{ 9544 Label* L = $labl$$label; 9545 assert(__ use_cbcond(*L), "back to back cbcond"); 9546 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9547 %} 9548 ins_short_branch(1); 9549 ins_avoid_back_to_back(1); 9550 ins_pipe(cbcond_reg_reg); 9551 %} 9552 9553 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9554 match(If cmp (CmpU op1 op2)); 9555 predicate(UseCBCond); 9556 effect(USE labl, KILL icc); 9557 9558 size(4); 9559 ins_cost(BRANCH_COST); 9560 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9561 ins_encode %{ 9562 Label* L = $labl$$label; 9563 assert(__ use_cbcond(*L), "back to back cbcond"); 9564 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9565 %} 9566 ins_short_branch(1); 9567 ins_avoid_back_to_back(1); 9568 ins_pipe(cbcond_reg_imm); 9569 %} 9570 9571 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9572 match(If cmp (CmpL op1 op2)); 9573 predicate(UseCBCond); 9574 effect(USE labl, KILL xcc); 9575 9576 size(4); 9577 ins_cost(BRANCH_COST); 9578 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9579 ins_encode %{ 9580 Label* L = $labl$$label; 9581 assert(__ use_cbcond(*L), "back to back cbcond"); 9582 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9583 %} 9584 ins_short_branch(1); 9585 ins_avoid_back_to_back(1); 9586 ins_pipe(cbcond_reg_reg); 9587 %} 9588 9589 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9590 match(If cmp (CmpL op1 op2)); 9591 predicate(UseCBCond); 9592 effect(USE labl, KILL xcc); 9593 9594 size(4); 9595 ins_cost(BRANCH_COST); 9596 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9597 ins_encode %{ 9598 Label* L = $labl$$label; 9599 assert(__ use_cbcond(*L), "back to back cbcond"); 9600 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9601 %} 9602 ins_short_branch(1); 9603 ins_avoid_back_to_back(1); 9604 ins_pipe(cbcond_reg_imm); 9605 %} 9606 9607 // Compare Pointers and branch 9608 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9609 match(If cmp (CmpP op1 op2)); 9610 predicate(UseCBCond); 9611 effect(USE labl, KILL pcc); 9612 9613 size(4); 9614 ins_cost(BRANCH_COST); 9615 #ifdef _LP64 9616 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9617 #else 9618 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %} 9619 #endif 9620 ins_encode %{ 9621 Label* L = $labl$$label; 9622 assert(__ use_cbcond(*L), "back to back cbcond"); 9623 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9624 %} 9625 ins_short_branch(1); 9626 ins_avoid_back_to_back(1); 9627 ins_pipe(cbcond_reg_reg); 9628 %} 9629 9630 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9631 match(If cmp (CmpP op1 null)); 9632 predicate(UseCBCond); 9633 effect(USE labl, KILL pcc); 9634 9635 size(4); 9636 ins_cost(BRANCH_COST); 9637 #ifdef _LP64 9638 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9639 #else 9640 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %} 9641 #endif 9642 ins_encode %{ 9643 Label* L = $labl$$label; 9644 assert(__ use_cbcond(*L), "back to back cbcond"); 9645 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9646 %} 9647 ins_short_branch(1); 9648 ins_avoid_back_to_back(1); 9649 ins_pipe(cbcond_reg_reg); 9650 %} 9651 9652 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9653 match(If cmp (CmpN op1 op2)); 9654 predicate(UseCBCond); 9655 effect(USE labl, KILL icc); 9656 9657 size(4); 9658 ins_cost(BRANCH_COST); 9659 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %} 9660 ins_encode %{ 9661 Label* L = $labl$$label; 9662 assert(__ use_cbcond(*L), "back to back cbcond"); 9663 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9664 %} 9665 ins_short_branch(1); 9666 ins_avoid_back_to_back(1); 9667 ins_pipe(cbcond_reg_reg); 9668 %} 9669 9670 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9671 match(If cmp (CmpN op1 null)); 9672 predicate(UseCBCond); 9673 effect(USE labl, KILL icc); 9674 9675 size(4); 9676 ins_cost(BRANCH_COST); 9677 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9678 ins_encode %{ 9679 Label* L = $labl$$label; 9680 assert(__ use_cbcond(*L), "back to back cbcond"); 9681 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9682 %} 9683 ins_short_branch(1); 9684 ins_avoid_back_to_back(1); 9685 ins_pipe(cbcond_reg_reg); 9686 %} 9687 9688 // Loop back branch 9689 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9690 match(CountedLoopEnd cmp (CmpI op1 op2)); 9691 predicate(UseCBCond); 9692 effect(USE labl, KILL icc); 9693 9694 size(4); 9695 ins_cost(BRANCH_COST); 9696 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9697 ins_encode %{ 9698 Label* L = $labl$$label; 9699 assert(__ use_cbcond(*L), "back to back cbcond"); 9700 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9701 %} 9702 ins_short_branch(1); 9703 ins_avoid_back_to_back(1); 9704 ins_pipe(cbcond_reg_reg); 9705 %} 9706 9707 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9708 match(CountedLoopEnd cmp (CmpI op1 op2)); 9709 predicate(UseCBCond); 9710 effect(USE labl, KILL icc); 9711 9712 size(4); 9713 ins_cost(BRANCH_COST); 9714 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9715 ins_encode %{ 9716 Label* L = $labl$$label; 9717 assert(__ use_cbcond(*L), "back to back cbcond"); 9718 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9719 %} 9720 ins_short_branch(1); 9721 ins_avoid_back_to_back(1); 9722 ins_pipe(cbcond_reg_imm); 9723 %} 9724 9725 // Branch-on-register tests all 64 bits. We assume that values 9726 // in 64-bit registers always remains zero or sign extended 9727 // unless our code munges the high bits. Interrupts can chop 9728 // the high order bits to zero or sign at any time. 9729 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9730 match(If cmp (CmpI op1 zero)); 9731 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9732 effect(USE labl); 9733 9734 size(8); 9735 ins_cost(BRANCH_COST); 9736 format %{ "BR$cmp $op1,$labl" %} 9737 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9738 ins_pipe(br_reg); 9739 %} 9740 9741 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9742 match(If cmp (CmpP op1 null)); 9743 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9744 effect(USE labl); 9745 9746 size(8); 9747 ins_cost(BRANCH_COST); 9748 format %{ "BR$cmp $op1,$labl" %} 9749 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9750 ins_pipe(br_reg); 9751 %} 9752 9753 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9754 match(If cmp (CmpL op1 zero)); 9755 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9756 effect(USE labl); 9757 9758 size(8); 9759 ins_cost(BRANCH_COST); 9760 format %{ "BR$cmp $op1,$labl" %} 9761 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9762 ins_pipe(br_reg); 9763 %} 9764 9765 9766 // ============================================================================ 9767 // Long Compare 9768 // 9769 // Currently we hold longs in 2 registers. Comparing such values efficiently 9770 // is tricky. The flavor of compare used depends on whether we are testing 9771 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9772 // The GE test is the negated LT test. The LE test can be had by commuting 9773 // the operands (yielding a GE test) and then negating; negate again for the 9774 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9775 // NE test is negated from that. 9776 9777 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9778 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9779 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9780 // are collapsed internally in the ADLC's dfa-gen code. The match for 9781 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9782 // foo match ends up with the wrong leaf. One fix is to not match both 9783 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9784 // both forms beat the trinary form of long-compare and both are very useful 9785 // on Intel which has so few registers. 9786 9787 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9788 match(If cmp xcc); 9789 effect(USE labl); 9790 9791 size(8); 9792 ins_cost(BRANCH_COST); 9793 format %{ "BP$cmp $xcc,$labl" %} 9794 ins_encode %{ 9795 Label* L = $labl$$label; 9796 Assembler::Predict predict_taken = 9797 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9798 9799 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9800 __ delayed()->nop(); 9801 %} 9802 ins_pipe(br_cc); 9803 %} 9804 9805 // Manifest a CmpL3 result in an integer register. Very painful. 9806 // This is the test to avoid. 9807 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9808 match(Set dst (CmpL3 src1 src2) ); 9809 effect( KILL ccr ); 9810 ins_cost(6*DEFAULT_COST); 9811 size(24); 9812 format %{ "CMP $src1,$src2\t\t! long\n" 9813 "\tBLT,a,pn done\n" 9814 "\tMOV -1,$dst\t! delay slot\n" 9815 "\tBGT,a,pn done\n" 9816 "\tMOV 1,$dst\t! delay slot\n" 9817 "\tCLR $dst\n" 9818 "done:" %} 9819 ins_encode( cmpl_flag(src1,src2,dst) ); 9820 ins_pipe(cmpL_reg); 9821 %} 9822 9823 // Conditional move 9824 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9825 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9826 ins_cost(150); 9827 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9828 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9829 ins_pipe(ialu_reg); 9830 %} 9831 9832 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 9833 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9834 ins_cost(140); 9835 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9836 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9837 ins_pipe(ialu_imm); 9838 %} 9839 9840 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 9841 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9842 ins_cost(150); 9843 format %{ "MOV$cmp $xcc,$src,$dst" %} 9844 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9845 ins_pipe(ialu_reg); 9846 %} 9847 9848 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 9849 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9850 ins_cost(140); 9851 format %{ "MOV$cmp $xcc,$src,$dst" %} 9852 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9853 ins_pipe(ialu_imm); 9854 %} 9855 9856 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 9857 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 9858 ins_cost(150); 9859 format %{ "MOV$cmp $xcc,$src,$dst" %} 9860 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9861 ins_pipe(ialu_reg); 9862 %} 9863 9864 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 9865 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9866 ins_cost(150); 9867 format %{ "MOV$cmp $xcc,$src,$dst" %} 9868 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9869 ins_pipe(ialu_reg); 9870 %} 9871 9872 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 9873 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9874 ins_cost(140); 9875 format %{ "MOV$cmp $xcc,$src,$dst" %} 9876 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9877 ins_pipe(ialu_imm); 9878 %} 9879 9880 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 9881 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 9882 ins_cost(150); 9883 opcode(0x101); 9884 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 9885 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9886 ins_pipe(int_conditional_float_move); 9887 %} 9888 9889 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 9890 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 9891 ins_cost(150); 9892 opcode(0x102); 9893 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 9894 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9895 ins_pipe(int_conditional_float_move); 9896 %} 9897 9898 // ============================================================================ 9899 // Safepoint Instruction 9900 instruct safePoint_poll(iRegP poll) %{ 9901 match(SafePoint poll); 9902 effect(USE poll); 9903 9904 size(4); 9905 #ifdef _LP64 9906 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 9907 #else 9908 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %} 9909 #endif 9910 ins_encode %{ 9911 __ relocate(relocInfo::poll_type); 9912 __ ld_ptr($poll$$Register, 0, G0); 9913 %} 9914 ins_pipe(loadPollP); 9915 %} 9916 9917 // ============================================================================ 9918 // Call Instructions 9919 // Call Java Static Instruction 9920 instruct CallStaticJavaDirect( method meth ) %{ 9921 match(CallStaticJava); 9922 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9923 effect(USE meth); 9924 9925 size(8); 9926 ins_cost(CALL_COST); 9927 format %{ "CALL,static ; NOP ==> " %} 9928 ins_encode( Java_Static_Call( meth ), call_epilog ); 9929 ins_pipe(simple_call); 9930 %} 9931 9932 // Call Java Static Instruction (method handle version) 9933 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 9934 match(CallStaticJava); 9935 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9936 effect(USE meth, KILL l7_mh_SP_save); 9937 9938 size(16); 9939 ins_cost(CALL_COST); 9940 format %{ "CALL,static/MethodHandle" %} 9941 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 9942 ins_pipe(simple_call); 9943 %} 9944 9945 // Call Java Dynamic Instruction 9946 instruct CallDynamicJavaDirect( method meth ) %{ 9947 match(CallDynamicJava); 9948 effect(USE meth); 9949 9950 ins_cost(CALL_COST); 9951 format %{ "SET (empty),R_G5\n\t" 9952 "CALL,dynamic ; NOP ==> " %} 9953 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 9954 ins_pipe(call); 9955 %} 9956 9957 // Call Runtime Instruction 9958 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 9959 match(CallRuntime); 9960 effect(USE meth, KILL l7); 9961 ins_cost(CALL_COST); 9962 format %{ "CALL,runtime" %} 9963 ins_encode( Java_To_Runtime( meth ), 9964 call_epilog, adjust_long_from_native_call ); 9965 ins_pipe(simple_call); 9966 %} 9967 9968 // Call runtime without safepoint - same as CallRuntime 9969 instruct CallLeafDirect(method meth, l7RegP l7) %{ 9970 match(CallLeaf); 9971 effect(USE meth, KILL l7); 9972 ins_cost(CALL_COST); 9973 format %{ "CALL,runtime leaf" %} 9974 ins_encode( Java_To_Runtime( meth ), 9975 call_epilog, 9976 adjust_long_from_native_call ); 9977 ins_pipe(simple_call); 9978 %} 9979 9980 // Call runtime without safepoint - same as CallLeaf 9981 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 9982 match(CallLeafNoFP); 9983 effect(USE meth, KILL l7); 9984 ins_cost(CALL_COST); 9985 format %{ "CALL,runtime leaf nofp" %} 9986 ins_encode( Java_To_Runtime( meth ), 9987 call_epilog, 9988 adjust_long_from_native_call ); 9989 ins_pipe(simple_call); 9990 %} 9991 9992 // Tail Call; Jump from runtime stub to Java code. 9993 // Also known as an 'interprocedural jump'. 9994 // Target of jump will eventually return to caller. 9995 // TailJump below removes the return address. 9996 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 9997 match(TailCall jump_target method_oop ); 9998 9999 ins_cost(CALL_COST); 10000 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 10001 ins_encode(form_jmpl(jump_target)); 10002 ins_pipe(tail_call); 10003 %} 10004 10005 10006 // Return Instruction 10007 instruct Ret() %{ 10008 match(Return); 10009 10010 // The epilogue node did the ret already. 10011 size(0); 10012 format %{ "! return" %} 10013 ins_encode(); 10014 ins_pipe(empty); 10015 %} 10016 10017 10018 // Tail Jump; remove the return address; jump to target. 10019 // TailCall above leaves the return address around. 10020 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 10021 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 10022 // "restore" before this instruction (in Epilogue), we need to materialize it 10023 // in %i0. 10024 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 10025 match( TailJump jump_target ex_oop ); 10026 ins_cost(CALL_COST); 10027 format %{ "! discard R_O7\n\t" 10028 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 10029 ins_encode(form_jmpl_set_exception_pc(jump_target)); 10030 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 10031 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 10032 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 10033 ins_pipe(tail_call); 10034 %} 10035 10036 // Create exception oop: created by stack-crawling runtime code. 10037 // Created exception is now available to this handler, and is setup 10038 // just prior to jumping to this handler. No code emitted. 10039 instruct CreateException( o0RegP ex_oop ) 10040 %{ 10041 match(Set ex_oop (CreateEx)); 10042 ins_cost(0); 10043 10044 size(0); 10045 // use the following format syntax 10046 format %{ "! exception oop is in R_O0; no code emitted" %} 10047 ins_encode(); 10048 ins_pipe(empty); 10049 %} 10050 10051 10052 // Rethrow exception: 10053 // The exception oop will come in the first argument position. 10054 // Then JUMP (not call) to the rethrow stub code. 10055 instruct RethrowException() 10056 %{ 10057 match(Rethrow); 10058 ins_cost(CALL_COST); 10059 10060 // use the following format syntax 10061 format %{ "Jmp rethrow_stub" %} 10062 ins_encode(enc_rethrow); 10063 ins_pipe(tail_call); 10064 %} 10065 10066 10067 // Die now 10068 instruct ShouldNotReachHere( ) 10069 %{ 10070 match(Halt); 10071 ins_cost(CALL_COST); 10072 10073 size(4); 10074 // Use the following format syntax 10075 format %{ "ILLTRAP ; ShouldNotReachHere" %} 10076 ins_encode( form2_illtrap() ); 10077 ins_pipe(tail_call); 10078 %} 10079 10080 // ============================================================================ 10081 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 10082 // array for an instance of the superklass. Set a hidden internal cache on a 10083 // hit (cache is checked with exposed code in gen_subtype_check()). Return 10084 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 10085 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 10086 match(Set index (PartialSubtypeCheck sub super)); 10087 effect( KILL pcc, KILL o7 ); 10088 ins_cost(DEFAULT_COST*10); 10089 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 10090 ins_encode( enc_PartialSubtypeCheck() ); 10091 ins_pipe(partial_subtype_check_pipe); 10092 %} 10093 10094 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 10095 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 10096 effect( KILL idx, KILL o7 ); 10097 ins_cost(DEFAULT_COST*10); 10098 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 10099 ins_encode( enc_PartialSubtypeCheck() ); 10100 ins_pipe(partial_subtype_check_pipe); 10101 %} 10102 10103 10104 // ============================================================================ 10105 // inlined locking and unlocking 10106 10107 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10108 match(Set pcc (FastLock object box)); 10109 10110 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10111 ins_cost(100); 10112 10113 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10114 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 10115 ins_pipe(long_memory_op); 10116 %} 10117 10118 10119 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10120 match(Set pcc (FastUnlock object box)); 10121 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10122 ins_cost(100); 10123 10124 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10125 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 10126 ins_pipe(long_memory_op); 10127 %} 10128 10129 // The encodings are generic. 10130 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 10131 predicate(!use_block_zeroing(n->in(2)) ); 10132 match(Set dummy (ClearArray cnt base)); 10133 effect(TEMP temp, KILL ccr); 10134 ins_cost(300); 10135 format %{ "MOV $cnt,$temp\n" 10136 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 10137 " BRge loop\t\t! Clearing loop\n" 10138 " STX G0,[$base+$temp]\t! delay slot" %} 10139 10140 ins_encode %{ 10141 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 10142 Register nof_bytes_arg = $cnt$$Register; 10143 Register nof_bytes_tmp = $temp$$Register; 10144 Register base_pointer_arg = $base$$Register; 10145 10146 Label loop; 10147 __ mov(nof_bytes_arg, nof_bytes_tmp); 10148 10149 // Loop and clear, walking backwards through the array. 10150 // nof_bytes_tmp (if >0) is always the number of bytes to zero 10151 __ bind(loop); 10152 __ deccc(nof_bytes_tmp, 8); 10153 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 10154 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 10155 // %%%% this mini-loop must not cross a cache boundary! 10156 %} 10157 ins_pipe(long_memory_op); 10158 %} 10159 10160 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 10161 predicate(use_block_zeroing(n->in(2))); 10162 match(Set dummy (ClearArray cnt base)); 10163 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 10164 ins_cost(300); 10165 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10166 10167 ins_encode %{ 10168 10169 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10170 Register to = $base$$Register; 10171 Register count = $cnt$$Register; 10172 10173 Label Ldone; 10174 __ nop(); // Separate short branches 10175 // Use BIS for zeroing (temp is not used). 10176 __ bis_zeroing(to, count, G0, Ldone); 10177 __ bind(Ldone); 10178 10179 %} 10180 ins_pipe(long_memory_op); 10181 %} 10182 10183 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 10184 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 10185 match(Set dummy (ClearArray cnt base)); 10186 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 10187 ins_cost(300); 10188 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10189 10190 ins_encode %{ 10191 10192 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10193 Register to = $base$$Register; 10194 Register count = $cnt$$Register; 10195 Register temp = $tmp$$Register; 10196 10197 Label Ldone; 10198 __ nop(); // Separate short branches 10199 // Use BIS for zeroing 10200 __ bis_zeroing(to, count, temp, Ldone); 10201 __ bind(Ldone); 10202 10203 %} 10204 ins_pipe(long_memory_op); 10205 %} 10206 10207 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10208 o7RegI tmp, flagsReg ccr) %{ 10209 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10210 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10211 ins_cost(300); 10212 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10213 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) ); 10214 ins_pipe(long_memory_op); 10215 %} 10216 10217 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10218 o7RegI tmp, flagsReg ccr) %{ 10219 match(Set result (StrEquals (Binary str1 str2) cnt)); 10220 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10221 ins_cost(300); 10222 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %} 10223 ins_encode( enc_String_Equals(str1, str2, cnt, result) ); 10224 ins_pipe(long_memory_op); 10225 %} 10226 10227 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10228 o7RegI tmp2, flagsReg ccr) %{ 10229 match(Set result (AryEq ary1 ary2)); 10230 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10231 ins_cost(300); 10232 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10233 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result)); 10234 ins_pipe(long_memory_op); 10235 %} 10236 10237 10238 //---------- Zeros Count Instructions ------------------------------------------ 10239 10240 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10241 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10242 match(Set dst (CountLeadingZerosI src)); 10243 effect(TEMP dst, TEMP tmp, KILL cr); 10244 10245 // x |= (x >> 1); 10246 // x |= (x >> 2); 10247 // x |= (x >> 4); 10248 // x |= (x >> 8); 10249 // x |= (x >> 16); 10250 // return (WORDBITS - popc(x)); 10251 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10252 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10253 "OR $dst,$tmp,$dst\n\t" 10254 "SRL $dst,2,$tmp\n\t" 10255 "OR $dst,$tmp,$dst\n\t" 10256 "SRL $dst,4,$tmp\n\t" 10257 "OR $dst,$tmp,$dst\n\t" 10258 "SRL $dst,8,$tmp\n\t" 10259 "OR $dst,$tmp,$dst\n\t" 10260 "SRL $dst,16,$tmp\n\t" 10261 "OR $dst,$tmp,$dst\n\t" 10262 "POPC $dst,$dst\n\t" 10263 "MOV 32,$tmp\n\t" 10264 "SUB $tmp,$dst,$dst" %} 10265 ins_encode %{ 10266 Register Rdst = $dst$$Register; 10267 Register Rsrc = $src$$Register; 10268 Register Rtmp = $tmp$$Register; 10269 __ srl(Rsrc, 1, Rtmp); 10270 __ srl(Rsrc, 0, Rdst); 10271 __ or3(Rdst, Rtmp, Rdst); 10272 __ srl(Rdst, 2, Rtmp); 10273 __ or3(Rdst, Rtmp, Rdst); 10274 __ srl(Rdst, 4, Rtmp); 10275 __ or3(Rdst, Rtmp, Rdst); 10276 __ srl(Rdst, 8, Rtmp); 10277 __ or3(Rdst, Rtmp, Rdst); 10278 __ srl(Rdst, 16, Rtmp); 10279 __ or3(Rdst, Rtmp, Rdst); 10280 __ popc(Rdst, Rdst); 10281 __ mov(BitsPerInt, Rtmp); 10282 __ sub(Rtmp, Rdst, Rdst); 10283 %} 10284 ins_pipe(ialu_reg); 10285 %} 10286 10287 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10288 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10289 match(Set dst (CountLeadingZerosL src)); 10290 effect(TEMP dst, TEMP tmp, KILL cr); 10291 10292 // x |= (x >> 1); 10293 // x |= (x >> 2); 10294 // x |= (x >> 4); 10295 // x |= (x >> 8); 10296 // x |= (x >> 16); 10297 // x |= (x >> 32); 10298 // return (WORDBITS - popc(x)); 10299 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10300 "OR $src,$tmp,$dst\n\t" 10301 "SRLX $dst,2,$tmp\n\t" 10302 "OR $dst,$tmp,$dst\n\t" 10303 "SRLX $dst,4,$tmp\n\t" 10304 "OR $dst,$tmp,$dst\n\t" 10305 "SRLX $dst,8,$tmp\n\t" 10306 "OR $dst,$tmp,$dst\n\t" 10307 "SRLX $dst,16,$tmp\n\t" 10308 "OR $dst,$tmp,$dst\n\t" 10309 "SRLX $dst,32,$tmp\n\t" 10310 "OR $dst,$tmp,$dst\n\t" 10311 "POPC $dst,$dst\n\t" 10312 "MOV 64,$tmp\n\t" 10313 "SUB $tmp,$dst,$dst" %} 10314 ins_encode %{ 10315 Register Rdst = $dst$$Register; 10316 Register Rsrc = $src$$Register; 10317 Register Rtmp = $tmp$$Register; 10318 __ srlx(Rsrc, 1, Rtmp); 10319 __ or3( Rsrc, Rtmp, Rdst); 10320 __ srlx(Rdst, 2, Rtmp); 10321 __ or3( Rdst, Rtmp, Rdst); 10322 __ srlx(Rdst, 4, Rtmp); 10323 __ or3( Rdst, Rtmp, Rdst); 10324 __ srlx(Rdst, 8, Rtmp); 10325 __ or3( Rdst, Rtmp, Rdst); 10326 __ srlx(Rdst, 16, Rtmp); 10327 __ or3( Rdst, Rtmp, Rdst); 10328 __ srlx(Rdst, 32, Rtmp); 10329 __ or3( Rdst, Rtmp, Rdst); 10330 __ popc(Rdst, Rdst); 10331 __ mov(BitsPerLong, Rtmp); 10332 __ sub(Rtmp, Rdst, Rdst); 10333 %} 10334 ins_pipe(ialu_reg); 10335 %} 10336 10337 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{ 10338 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10339 match(Set dst (CountTrailingZerosI src)); 10340 effect(TEMP dst, KILL cr); 10341 10342 // return popc(~x & (x - 1)); 10343 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10344 "ANDN $dst,$src,$dst\n\t" 10345 "SRL $dst,R_G0,$dst\n\t" 10346 "POPC $dst,$dst" %} 10347 ins_encode %{ 10348 Register Rdst = $dst$$Register; 10349 Register Rsrc = $src$$Register; 10350 __ sub(Rsrc, 1, Rdst); 10351 __ andn(Rdst, Rsrc, Rdst); 10352 __ srl(Rdst, G0, Rdst); 10353 __ popc(Rdst, Rdst); 10354 %} 10355 ins_pipe(ialu_reg); 10356 %} 10357 10358 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10359 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10360 match(Set dst (CountTrailingZerosL src)); 10361 effect(TEMP dst, KILL cr); 10362 10363 // return popc(~x & (x - 1)); 10364 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10365 "ANDN $dst,$src,$dst\n\t" 10366 "POPC $dst,$dst" %} 10367 ins_encode %{ 10368 Register Rdst = $dst$$Register; 10369 Register Rsrc = $src$$Register; 10370 __ sub(Rsrc, 1, Rdst); 10371 __ andn(Rdst, Rsrc, Rdst); 10372 __ popc(Rdst, Rdst); 10373 %} 10374 ins_pipe(ialu_reg); 10375 %} 10376 10377 10378 //---------- Population Count Instructions ------------------------------------- 10379 10380 instruct popCountI(iRegIsafe dst, iRegI src) %{ 10381 predicate(UsePopCountInstruction); 10382 match(Set dst (PopCountI src)); 10383 10384 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t" 10385 "POPC $dst, $dst" %} 10386 ins_encode %{ 10387 __ srl($src$$Register, G0, $dst$$Register); 10388 __ popc($dst$$Register, $dst$$Register); 10389 %} 10390 ins_pipe(ialu_reg); 10391 %} 10392 10393 // Note: Long.bitCount(long) returns an int. 10394 instruct popCountL(iRegIsafe dst, iRegL src) %{ 10395 predicate(UsePopCountInstruction); 10396 match(Set dst (PopCountL src)); 10397 10398 format %{ "POPC $src, $dst" %} 10399 ins_encode %{ 10400 __ popc($src$$Register, $dst$$Register); 10401 %} 10402 ins_pipe(ialu_reg); 10403 %} 10404 10405 10406 // ============================================================================ 10407 //------------Bytes reverse-------------------------------------------------- 10408 10409 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10410 match(Set dst (ReverseBytesI src)); 10411 10412 // Op cost is artificially doubled to make sure that load or store 10413 // instructions are preferred over this one which requires a spill 10414 // onto a stack slot. 10415 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10416 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10417 10418 ins_encode %{ 10419 __ set($src$$disp + STACK_BIAS, O7); 10420 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10421 %} 10422 ins_pipe( iload_mem ); 10423 %} 10424 10425 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10426 match(Set dst (ReverseBytesL src)); 10427 10428 // Op cost is artificially doubled to make sure that load or store 10429 // instructions are preferred over this one which requires a spill 10430 // onto a stack slot. 10431 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10432 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10433 10434 ins_encode %{ 10435 __ set($src$$disp + STACK_BIAS, O7); 10436 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10437 %} 10438 ins_pipe( iload_mem ); 10439 %} 10440 10441 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10442 match(Set dst (ReverseBytesUS src)); 10443 10444 // Op cost is artificially doubled to make sure that load or store 10445 // instructions are preferred over this one which requires a spill 10446 // onto a stack slot. 10447 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10448 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10449 10450 ins_encode %{ 10451 // the value was spilled as an int so bias the load 10452 __ set($src$$disp + STACK_BIAS + 2, O7); 10453 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10454 %} 10455 ins_pipe( iload_mem ); 10456 %} 10457 10458 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10459 match(Set dst (ReverseBytesS src)); 10460 10461 // Op cost is artificially doubled to make sure that load or store 10462 // instructions are preferred over this one which requires a spill 10463 // onto a stack slot. 10464 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10465 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10466 10467 ins_encode %{ 10468 // the value was spilled as an int so bias the load 10469 __ set($src$$disp + STACK_BIAS + 2, O7); 10470 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10471 %} 10472 ins_pipe( iload_mem ); 10473 %} 10474 10475 // Load Integer reversed byte order 10476 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10477 match(Set dst (ReverseBytesI (LoadI src))); 10478 10479 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10480 size(4); 10481 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10482 10483 ins_encode %{ 10484 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10485 %} 10486 ins_pipe(iload_mem); 10487 %} 10488 10489 // Load Long - aligned and reversed 10490 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10491 match(Set dst (ReverseBytesL (LoadL src))); 10492 10493 ins_cost(MEMORY_REF_COST); 10494 size(4); 10495 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10496 10497 ins_encode %{ 10498 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10499 %} 10500 ins_pipe(iload_mem); 10501 %} 10502 10503 // Load unsigned short / char reversed byte order 10504 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10505 match(Set dst (ReverseBytesUS (LoadUS src))); 10506 10507 ins_cost(MEMORY_REF_COST); 10508 size(4); 10509 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10510 10511 ins_encode %{ 10512 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10513 %} 10514 ins_pipe(iload_mem); 10515 %} 10516 10517 // Load short reversed byte order 10518 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10519 match(Set dst (ReverseBytesS (LoadS src))); 10520 10521 ins_cost(MEMORY_REF_COST); 10522 size(4); 10523 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10524 10525 ins_encode %{ 10526 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10527 %} 10528 ins_pipe(iload_mem); 10529 %} 10530 10531 // Store Integer reversed byte order 10532 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10533 match(Set dst (StoreI dst (ReverseBytesI src))); 10534 10535 ins_cost(MEMORY_REF_COST); 10536 size(4); 10537 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10538 10539 ins_encode %{ 10540 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10541 %} 10542 ins_pipe(istore_mem_reg); 10543 %} 10544 10545 // Store Long reversed byte order 10546 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10547 match(Set dst (StoreL dst (ReverseBytesL src))); 10548 10549 ins_cost(MEMORY_REF_COST); 10550 size(4); 10551 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10552 10553 ins_encode %{ 10554 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10555 %} 10556 ins_pipe(istore_mem_reg); 10557 %} 10558 10559 // Store unsighed short/char reversed byte order 10560 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10561 match(Set dst (StoreC dst (ReverseBytesUS src))); 10562 10563 ins_cost(MEMORY_REF_COST); 10564 size(4); 10565 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10566 10567 ins_encode %{ 10568 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10569 %} 10570 ins_pipe(istore_mem_reg); 10571 %} 10572 10573 // Store short reversed byte order 10574 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10575 match(Set dst (StoreC dst (ReverseBytesS src))); 10576 10577 ins_cost(MEMORY_REF_COST); 10578 size(4); 10579 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10580 10581 ins_encode %{ 10582 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10583 %} 10584 ins_pipe(istore_mem_reg); 10585 %} 10586 10587 // ====================VECTOR INSTRUCTIONS===================================== 10588 10589 // Load Aligned Packed values into a Double Register 10590 instruct loadV8(regD dst, memory mem) %{ 10591 predicate(n->as_LoadVector()->memory_size() == 8); 10592 match(Set dst (LoadVector mem)); 10593 ins_cost(MEMORY_REF_COST); 10594 size(4); 10595 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %} 10596 ins_encode %{ 10597 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg)); 10598 %} 10599 ins_pipe(floadD_mem); 10600 %} 10601 10602 // Store Vector in Double register to memory 10603 instruct storeV8(memory mem, regD src) %{ 10604 predicate(n->as_StoreVector()->memory_size() == 8); 10605 match(Set mem (StoreVector mem src)); 10606 ins_cost(MEMORY_REF_COST); 10607 size(4); 10608 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %} 10609 ins_encode %{ 10610 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address); 10611 %} 10612 ins_pipe(fstoreD_mem_reg); 10613 %} 10614 10615 // Store Zero into vector in memory 10616 instruct storeV8B_zero(memory mem, immI0 zero) %{ 10617 predicate(n->as_StoreVector()->memory_size() == 8); 10618 match(Set mem (StoreVector mem (ReplicateB zero))); 10619 ins_cost(MEMORY_REF_COST); 10620 size(4); 10621 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %} 10622 ins_encode %{ 10623 __ stx(G0, $mem$$Address); 10624 %} 10625 ins_pipe(fstoreD_mem_zero); 10626 %} 10627 10628 instruct storeV4S_zero(memory mem, immI0 zero) %{ 10629 predicate(n->as_StoreVector()->memory_size() == 8); 10630 match(Set mem (StoreVector mem (ReplicateS zero))); 10631 ins_cost(MEMORY_REF_COST); 10632 size(4); 10633 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %} 10634 ins_encode %{ 10635 __ stx(G0, $mem$$Address); 10636 %} 10637 ins_pipe(fstoreD_mem_zero); 10638 %} 10639 10640 instruct storeV2I_zero(memory mem, immI0 zero) %{ 10641 predicate(n->as_StoreVector()->memory_size() == 8); 10642 match(Set mem (StoreVector mem (ReplicateI zero))); 10643 ins_cost(MEMORY_REF_COST); 10644 size(4); 10645 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %} 10646 ins_encode %{ 10647 __ stx(G0, $mem$$Address); 10648 %} 10649 ins_pipe(fstoreD_mem_zero); 10650 %} 10651 10652 instruct storeV2F_zero(memory mem, immF0 zero) %{ 10653 predicate(n->as_StoreVector()->memory_size() == 8); 10654 match(Set mem (StoreVector mem (ReplicateF zero))); 10655 ins_cost(MEMORY_REF_COST); 10656 size(4); 10657 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %} 10658 ins_encode %{ 10659 __ stx(G0, $mem$$Address); 10660 %} 10661 ins_pipe(fstoreD_mem_zero); 10662 %} 10663 10664 // Replicate scalar to packed byte values into Double register 10665 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10666 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3); 10667 match(Set dst (ReplicateB src)); 10668 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10669 format %{ "SLLX $src,56,$tmp\n\t" 10670 "SRLX $tmp, 8,$tmp2\n\t" 10671 "OR $tmp,$tmp2,$tmp\n\t" 10672 "SRLX $tmp,16,$tmp2\n\t" 10673 "OR $tmp,$tmp2,$tmp\n\t" 10674 "SRLX $tmp,32,$tmp2\n\t" 10675 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10676 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10677 ins_encode %{ 10678 Register Rsrc = $src$$Register; 10679 Register Rtmp = $tmp$$Register; 10680 Register Rtmp2 = $tmp2$$Register; 10681 __ sllx(Rsrc, 56, Rtmp); 10682 __ srlx(Rtmp, 8, Rtmp2); 10683 __ or3 (Rtmp, Rtmp2, Rtmp); 10684 __ srlx(Rtmp, 16, Rtmp2); 10685 __ or3 (Rtmp, Rtmp2, Rtmp); 10686 __ srlx(Rtmp, 32, Rtmp2); 10687 __ or3 (Rtmp, Rtmp2, Rtmp); 10688 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10689 %} 10690 ins_pipe(ialu_reg); 10691 %} 10692 10693 // Replicate scalar to packed byte values into Double stack 10694 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10695 predicate(n->as_Vector()->length() == 8 && UseVIS < 3); 10696 match(Set dst (ReplicateB src)); 10697 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10698 format %{ "SLLX $src,56,$tmp\n\t" 10699 "SRLX $tmp, 8,$tmp2\n\t" 10700 "OR $tmp,$tmp2,$tmp\n\t" 10701 "SRLX $tmp,16,$tmp2\n\t" 10702 "OR $tmp,$tmp2,$tmp\n\t" 10703 "SRLX $tmp,32,$tmp2\n\t" 10704 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10705 "STX $tmp,$dst\t! regL to stkD" %} 10706 ins_encode %{ 10707 Register Rsrc = $src$$Register; 10708 Register Rtmp = $tmp$$Register; 10709 Register Rtmp2 = $tmp2$$Register; 10710 __ sllx(Rsrc, 56, Rtmp); 10711 __ srlx(Rtmp, 8, Rtmp2); 10712 __ or3 (Rtmp, Rtmp2, Rtmp); 10713 __ srlx(Rtmp, 16, Rtmp2); 10714 __ or3 (Rtmp, Rtmp2, Rtmp); 10715 __ srlx(Rtmp, 32, Rtmp2); 10716 __ or3 (Rtmp, Rtmp2, Rtmp); 10717 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10718 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10719 %} 10720 ins_pipe(ialu_reg); 10721 %} 10722 10723 // Replicate scalar constant to packed byte values in Double register 10724 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 10725 predicate(n->as_Vector()->length() == 8); 10726 match(Set dst (ReplicateB con)); 10727 effect(KILL tmp); 10728 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 10729 ins_encode %{ 10730 // XXX This is a quick fix for 6833573. 10731 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 10732 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 10733 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10734 %} 10735 ins_pipe(loadConFD); 10736 %} 10737 10738 // Replicate scalar to packed char/short values into Double register 10739 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10740 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3); 10741 match(Set dst (ReplicateS src)); 10742 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10743 format %{ "SLLX $src,48,$tmp\n\t" 10744 "SRLX $tmp,16,$tmp2\n\t" 10745 "OR $tmp,$tmp2,$tmp\n\t" 10746 "SRLX $tmp,32,$tmp2\n\t" 10747 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10748 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10749 ins_encode %{ 10750 Register Rsrc = $src$$Register; 10751 Register Rtmp = $tmp$$Register; 10752 Register Rtmp2 = $tmp2$$Register; 10753 __ sllx(Rsrc, 48, Rtmp); 10754 __ srlx(Rtmp, 16, Rtmp2); 10755 __ or3 (Rtmp, Rtmp2, Rtmp); 10756 __ srlx(Rtmp, 32, Rtmp2); 10757 __ or3 (Rtmp, Rtmp2, Rtmp); 10758 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10759 %} 10760 ins_pipe(ialu_reg); 10761 %} 10762 10763 // Replicate scalar to packed char/short values into Double stack 10764 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10765 predicate(n->as_Vector()->length() == 4 && UseVIS < 3); 10766 match(Set dst (ReplicateS src)); 10767 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10768 format %{ "SLLX $src,48,$tmp\n\t" 10769 "SRLX $tmp,16,$tmp2\n\t" 10770 "OR $tmp,$tmp2,$tmp\n\t" 10771 "SRLX $tmp,32,$tmp2\n\t" 10772 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10773 "STX $tmp,$dst\t! regL to stkD" %} 10774 ins_encode %{ 10775 Register Rsrc = $src$$Register; 10776 Register Rtmp = $tmp$$Register; 10777 Register Rtmp2 = $tmp2$$Register; 10778 __ sllx(Rsrc, 48, Rtmp); 10779 __ srlx(Rtmp, 16, Rtmp2); 10780 __ or3 (Rtmp, Rtmp2, Rtmp); 10781 __ srlx(Rtmp, 32, Rtmp2); 10782 __ or3 (Rtmp, Rtmp2, Rtmp); 10783 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10784 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10785 %} 10786 ins_pipe(ialu_reg); 10787 %} 10788 10789 // Replicate scalar constant to packed char/short values in Double register 10790 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 10791 predicate(n->as_Vector()->length() == 4); 10792 match(Set dst (ReplicateS con)); 10793 effect(KILL tmp); 10794 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 10795 ins_encode %{ 10796 // XXX This is a quick fix for 6833573. 10797 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 10798 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 10799 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10800 %} 10801 ins_pipe(loadConFD); 10802 %} 10803 10804 // Replicate scalar to packed int values into Double register 10805 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10806 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3); 10807 match(Set dst (ReplicateI src)); 10808 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10809 format %{ "SLLX $src,32,$tmp\n\t" 10810 "SRLX $tmp,32,$tmp2\n\t" 10811 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10812 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10813 ins_encode %{ 10814 Register Rsrc = $src$$Register; 10815 Register Rtmp = $tmp$$Register; 10816 Register Rtmp2 = $tmp2$$Register; 10817 __ sllx(Rsrc, 32, Rtmp); 10818 __ srlx(Rtmp, 32, Rtmp2); 10819 __ or3 (Rtmp, Rtmp2, Rtmp); 10820 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10821 %} 10822 ins_pipe(ialu_reg); 10823 %} 10824 10825 // Replicate scalar to packed int values into Double stack 10826 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10827 predicate(n->as_Vector()->length() == 2 && UseVIS < 3); 10828 match(Set dst (ReplicateI src)); 10829 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10830 format %{ "SLLX $src,32,$tmp\n\t" 10831 "SRLX $tmp,32,$tmp2\n\t" 10832 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10833 "STX $tmp,$dst\t! regL to stkD" %} 10834 ins_encode %{ 10835 Register Rsrc = $src$$Register; 10836 Register Rtmp = $tmp$$Register; 10837 Register Rtmp2 = $tmp2$$Register; 10838 __ sllx(Rsrc, 32, Rtmp); 10839 __ srlx(Rtmp, 32, Rtmp2); 10840 __ or3 (Rtmp, Rtmp2, Rtmp); 10841 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10842 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10843 %} 10844 ins_pipe(ialu_reg); 10845 %} 10846 10847 // Replicate scalar zero constant to packed int values in Double register 10848 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 10849 predicate(n->as_Vector()->length() == 2); 10850 match(Set dst (ReplicateI con)); 10851 effect(KILL tmp); 10852 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 10853 ins_encode %{ 10854 // XXX This is a quick fix for 6833573. 10855 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 10856 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 10857 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10858 %} 10859 ins_pipe(loadConFD); 10860 %} 10861 10862 // Replicate scalar to packed float values into Double stack 10863 instruct Repl2F_stk(stackSlotD dst, regF src) %{ 10864 predicate(n->as_Vector()->length() == 2); 10865 match(Set dst (ReplicateF src)); 10866 ins_cost(MEMORY_REF_COST*2); 10867 format %{ "STF $src,$dst.hi\t! packed2F\n\t" 10868 "STF $src,$dst.lo" %} 10869 opcode(Assembler::stf_op3); 10870 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src)); 10871 ins_pipe(fstoreF_stk_reg); 10872 %} 10873 10874 // Replicate scalar zero constant to packed float values in Double register 10875 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{ 10876 predicate(n->as_Vector()->length() == 2); 10877 match(Set dst (ReplicateF con)); 10878 effect(KILL tmp); 10879 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %} 10880 ins_encode %{ 10881 // XXX This is a quick fix for 6833573. 10882 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister); 10883 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register); 10884 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10885 %} 10886 ins_pipe(loadConFD); 10887 %} 10888 10889 //----------PEEPHOLE RULES----------------------------------------------------- 10890 // These must follow all instruction definitions as they use the names 10891 // defined in the instructions definitions. 10892 // 10893 // peepmatch ( root_instr_name [preceding_instruction]* ); 10894 // 10895 // peepconstraint %{ 10896 // (instruction_number.operand_name relational_op instruction_number.operand_name 10897 // [, ...] ); 10898 // // instruction numbers are zero-based using left to right order in peepmatch 10899 // 10900 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10901 // // provide an instruction_number.operand_name for each operand that appears 10902 // // in the replacement instruction's match rule 10903 // 10904 // ---------VM FLAGS--------------------------------------------------------- 10905 // 10906 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10907 // 10908 // Each peephole rule is given an identifying number starting with zero and 10909 // increasing by one in the order seen by the parser. An individual peephole 10910 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10911 // on the command-line. 10912 // 10913 // ---------CURRENT LIMITATIONS---------------------------------------------- 10914 // 10915 // Only match adjacent instructions in same basic block 10916 // Only equality constraints 10917 // Only constraints between operands, not (0.dest_reg == EAX_enc) 10918 // Only one replacement instruction 10919 // 10920 // ---------EXAMPLE---------------------------------------------------------- 10921 // 10922 // // pertinent parts of existing instructions in architecture description 10923 // instruct movI(eRegI dst, eRegI src) %{ 10924 // match(Set dst (CopyI src)); 10925 // %} 10926 // 10927 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 10928 // match(Set dst (AddI dst src)); 10929 // effect(KILL cr); 10930 // %} 10931 // 10932 // // Change (inc mov) to lea 10933 // peephole %{ 10934 // // increment preceeded by register-register move 10935 // peepmatch ( incI_eReg movI ); 10936 // // require that the destination register of the increment 10937 // // match the destination register of the move 10938 // peepconstraint ( 0.dst == 1.dst ); 10939 // // construct a replacement instruction that sets 10940 // // the destination to ( move's source register + one ) 10941 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 10942 // %} 10943 // 10944 10945 // // Change load of spilled value to only a spill 10946 // instruct storeI(memory mem, eRegI src) %{ 10947 // match(Set mem (StoreI mem src)); 10948 // %} 10949 // 10950 // instruct loadI(eRegI dst, memory mem) %{ 10951 // match(Set dst (LoadI mem)); 10952 // %} 10953 // 10954 // peephole %{ 10955 // peepmatch ( loadI storeI ); 10956 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 10957 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 10958 // %} 10959 10960 //----------SMARTSPILL RULES--------------------------------------------------- 10961 // These must follow all instruction definitions as they use the names 10962 // defined in the instructions definitions. 10963 // 10964 // SPARC will probably not have any of these rules due to RISC instruction set. 10965 10966 //----------PIPELINE----------------------------------------------------------- 10967 // Rules which define the behavior of the target architectures pipeline.