1 // 2 // Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 #ifdef _LP64 315 // 64-bit build means 64-bit pointers means hi/lo pairs 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 320 // Lock encodings use G3 and G4 internally 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 325 // Special class for storeP instructions, which can store SP or RPC to TLS. 326 // It is also used for memory addressing, allowing direct TLS addressing. 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register 332 // We use it to save R_G2 across calls out of Java. 333 reg_class l7_regP(R_L7H,R_L7); 334 335 // Other special pointer regs 336 reg_class g1_regP(R_G1H,R_G1); 337 reg_class g2_regP(R_G2H,R_G2); 338 reg_class g3_regP(R_G3H,R_G3); 339 reg_class g4_regP(R_G4H,R_G4); 340 reg_class g5_regP(R_G5H,R_G5); 341 reg_class i0_regP(R_I0H,R_I0); 342 reg_class o0_regP(R_O0H,R_O0); 343 reg_class o1_regP(R_O1H,R_O1); 344 reg_class o2_regP(R_O2H,R_O2); 345 reg_class o7_regP(R_O7H,R_O7); 346 347 #else // _LP64 348 // 32-bit build means 32-bit pointers means 1 register. 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5, 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 353 // Lock encodings use G3 and G4 internally 354 reg_class lock_ptr_reg(R_G1, R_G5, 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 358 // Special class for storeP instructions, which can store SP or RPC to TLS. 359 // It is also used for memory addressing, allowing direct TLS addressing. 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5, 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP, 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP); 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register 365 // We use it to save R_G2 across calls out of Java. 366 reg_class l7_regP(R_L7); 367 368 // Other special pointer regs 369 reg_class g1_regP(R_G1); 370 reg_class g2_regP(R_G2); 371 reg_class g3_regP(R_G3); 372 reg_class g4_regP(R_G4); 373 reg_class g5_regP(R_G5); 374 reg_class i0_regP(R_I0); 375 reg_class o0_regP(R_O0); 376 reg_class o1_regP(R_O1); 377 reg_class o2_regP(R_O2); 378 reg_class o7_regP(R_O7); 379 #endif // _LP64 380 381 382 // ---------------------------- 383 // Long Register Classes 384 // ---------------------------- 385 // Longs in 1 register. Aligned adjacent hi/lo pairs. 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 389 #ifdef _LP64 390 // 64-bit, longs in 1 register: use all 64-bit integer registers 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 394 #endif // _LP64 395 ); 396 397 reg_class g1_regL(R_G1H,R_G1); 398 reg_class g3_regL(R_G3H,R_G3); 399 reg_class o2_regL(R_O2H,R_O2); 400 reg_class o7_regL(R_O7H,R_O7); 401 402 // ---------------------------- 403 // Special Class for Condition Code Flags Register 404 reg_class int_flags(CCR); 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 406 reg_class float_flag0(FCC0); 407 408 409 // ---------------------------- 410 // Float Point Register Classes 411 // ---------------------------- 412 // Skip F30/F31, they are reserved for mem-mem copies 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 414 415 // Paired floating point registers--they show up in the same order as the floats, 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 419 /* Use extra V9 double registers; this AD file does not support V8 */ 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 422 ); 423 424 // Paired floating point registers--they show up in the same order as the floats, 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 426 // This class is usable for mis-aligned loads as happen in I2C adapters. 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 429 %} 430 431 //----------DEFINITION BLOCK--------------------------------------------------- 432 // Define name --> value mappings to inform the ADLC of an integer valued name 433 // Current support includes integer values in the range [0, 0x7FFFFFFF] 434 // Format: 435 // int_def <name> ( <int_value>, <expression>); 436 // Generated Code in ad_<arch>.hpp 437 // #define <name> (<expression>) 438 // // value == <int_value> 439 // Generated code in ad_<arch>.cpp adlc_verification() 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 441 // 442 definitions %{ 443 // The default cost (of an ALU instruction). 444 int_def DEFAULT_COST ( 100, 100); 445 int_def HUGE_COST (1000000, 1000000); 446 447 // Memory refs are twice as expensive as run-of-the-mill. 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 449 450 // Branches are even more expensive. 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 452 int_def CALL_COST ( 300, DEFAULT_COST * 3); 453 %} 454 455 456 //----------SOURCE BLOCK------------------------------------------------------- 457 // This is a block of C++ code which provides values, functions, and 458 // definitions necessary in the rest of the architecture description 459 source_hpp %{ 460 // Header information of the source block. 461 // Method declarations/definitions which are used outside 462 // the ad-scope can conveniently be defined here. 463 // 464 // To keep related declarations/definitions/uses close together, 465 // we switch between source %{ }% and source_hpp %{ }% freely as needed. 466 467 // Must be visible to the DFA in dfa_sparc.cpp 468 extern bool can_branch_register( Node *bol, Node *cmp ); 469 470 extern bool use_block_zeroing(Node* count); 471 472 // Macros to extract hi & lo halves from a long pair. 473 // G0 is not part of any long pair, so assert on that. 474 // Prevents accidentally using G1 instead of G0. 475 #define LONG_HI_REG(x) (x) 476 #define LONG_LO_REG(x) (x) 477 478 class CallStubImpl { 479 480 //-------------------------------------------------------------- 481 //---< Used for optimization in Compile::Shorten_branches >--- 482 //-------------------------------------------------------------- 483 484 public: 485 // Size of call trampoline stub. 486 static uint size_call_trampoline() { 487 return 0; // no call trampolines on this platform 488 } 489 490 // number of relocations needed by a call trampoline stub 491 static uint reloc_call_trampoline() { 492 return 0; // no call trampolines on this platform 493 } 494 }; 495 496 class HandlerImpl { 497 498 public: 499 500 static int emit_exception_handler(CodeBuffer &cbuf); 501 static int emit_deopt_handler(CodeBuffer& cbuf); 502 503 static uint size_exception_handler() { 504 if (TraceJumps) { 505 return (400); // just a guess 506 } 507 return ( NativeJump::instruction_size ); // sethi;jmp;nop 508 } 509 510 static uint size_deopt_handler() { 511 if (TraceJumps) { 512 return (400); // just a guess 513 } 514 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 515 } 516 }; 517 518 %} 519 520 source %{ 521 #define __ _masm. 522 523 // tertiary op of a LoadP or StoreP encoding 524 #define REGP_OP true 525 526 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 527 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 528 static Register reg_to_register_object(int register_encoding); 529 530 // Used by the DFA in dfa_sparc.cpp. 531 // Check for being able to use a V9 branch-on-register. Requires a 532 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 533 // extended. Doesn't work following an integer ADD, for example, because of 534 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 535 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 536 // replace them with zero, which could become sign-extension in a different OS 537 // release. There's no obvious reason why an interrupt will ever fill these 538 // bits with non-zero junk (the registers are reloaded with standard LD 539 // instructions which either zero-fill or sign-fill). 540 bool can_branch_register( Node *bol, Node *cmp ) { 541 if( !BranchOnRegister ) return false; 542 #ifdef _LP64 543 if( cmp->Opcode() == Op_CmpP ) 544 return true; // No problems with pointer compares 545 #endif 546 if( cmp->Opcode() == Op_CmpL ) 547 return true; // No problems with long compares 548 549 if( !SparcV9RegsHiBitsZero ) return false; 550 if( bol->as_Bool()->_test._test != BoolTest::ne && 551 bol->as_Bool()->_test._test != BoolTest::eq ) 552 return false; 553 554 // Check for comparing against a 'safe' value. Any operation which 555 // clears out the high word is safe. Thus, loads and certain shifts 556 // are safe, as are non-negative constants. Any operation which 557 // preserves zero bits in the high word is safe as long as each of its 558 // inputs are safe. Thus, phis and bitwise booleans are safe if their 559 // inputs are safe. At present, the only important case to recognize 560 // seems to be loads. Constants should fold away, and shifts & 561 // logicals can use the 'cc' forms. 562 Node *x = cmp->in(1); 563 if( x->is_Load() ) return true; 564 if( x->is_Phi() ) { 565 for( uint i = 1; i < x->req(); i++ ) 566 if( !x->in(i)->is_Load() ) 567 return false; 568 return true; 569 } 570 return false; 571 } 572 573 bool use_block_zeroing(Node* count) { 574 // Use BIS for zeroing if count is not constant 575 // or it is >= BlockZeroingLowLimit. 576 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 577 } 578 579 // **************************************************************************** 580 581 // REQUIRED FUNCTIONALITY 582 583 // !!!!! Special hack to get all type of calls to specify the byte offset 584 // from the start of the call to the point where the return address 585 // will point. 586 // The "return address" is the address of the call instruction, plus 8. 587 588 int MachCallStaticJavaNode::ret_addr_offset() { 589 int offset = NativeCall::instruction_size; // call; delay slot 590 if (_method_handle_invoke) 591 offset += 4; // restore SP 592 return offset; 593 } 594 595 int MachCallDynamicJavaNode::ret_addr_offset() { 596 int vtable_index = this->_vtable_index; 597 if (vtable_index < 0) { 598 // must be invalid_vtable_index, not nonvirtual_vtable_index 599 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 600 return (NativeMovConstReg::instruction_size + 601 NativeCall::instruction_size); // sethi; setlo; call; delay slot 602 } else { 603 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 604 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 605 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 606 int klass_load_size; 607 if (UseCompressedClassPointers) { 608 assert(Universe::heap() != NULL, "java heap should be initialized"); 609 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 610 } else { 611 klass_load_size = 1*BytesPerInstWord; 612 } 613 if (Assembler::is_simm13(v_off)) { 614 return klass_load_size + 615 (2*BytesPerInstWord + // ld_ptr, ld_ptr 616 NativeCall::instruction_size); // call; delay slot 617 } else { 618 return klass_load_size + 619 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 620 NativeCall::instruction_size); // call; delay slot 621 } 622 } 623 } 624 625 int MachCallRuntimeNode::ret_addr_offset() { 626 #ifdef _LP64 627 if (MacroAssembler::is_far_target(entry_point())) { 628 return NativeFarCall::instruction_size; 629 } else { 630 return NativeCall::instruction_size; 631 } 632 #else 633 return NativeCall::instruction_size; // call; delay slot 634 #endif 635 } 636 637 // Indicate if the safepoint node needs the polling page as an input. 638 // Since Sparc does not have absolute addressing, it does. 639 bool SafePointNode::needs_polling_address_input() { 640 return true; 641 } 642 643 // emit an interrupt that is caught by the debugger (for debugging compiler) 644 void emit_break(CodeBuffer &cbuf) { 645 MacroAssembler _masm(&cbuf); 646 __ breakpoint_trap(); 647 } 648 649 #ifndef PRODUCT 650 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 651 st->print("TA"); 652 } 653 #endif 654 655 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 656 emit_break(cbuf); 657 } 658 659 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 660 return MachNode::size(ra_); 661 } 662 663 // Traceable jump 664 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 665 MacroAssembler _masm(&cbuf); 666 Register rdest = reg_to_register_object(jump_target); 667 __ JMP(rdest, 0); 668 __ delayed()->nop(); 669 } 670 671 // Traceable jump and set exception pc 672 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 673 MacroAssembler _masm(&cbuf); 674 Register rdest = reg_to_register_object(jump_target); 675 __ JMP(rdest, 0); 676 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 677 } 678 679 void emit_nop(CodeBuffer &cbuf) { 680 MacroAssembler _masm(&cbuf); 681 __ nop(); 682 } 683 684 void emit_illtrap(CodeBuffer &cbuf) { 685 MacroAssembler _masm(&cbuf); 686 __ illtrap(0); 687 } 688 689 690 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 691 assert(n->rule() != loadUB_rule, ""); 692 693 intptr_t offset = 0; 694 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 695 const Node* addr = n->get_base_and_disp(offset, adr_type); 696 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 697 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 698 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 699 atype = atype->add_offset(offset); 700 assert(disp32 == offset, "wrong disp32"); 701 return atype->_offset; 702 } 703 704 705 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 706 assert(n->rule() != loadUB_rule, ""); 707 708 intptr_t offset = 0; 709 Node* addr = n->in(2); 710 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 711 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 712 Node* a = addr->in(2/*AddPNode::Address*/); 713 Node* o = addr->in(3/*AddPNode::Offset*/); 714 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 715 atype = a->bottom_type()->is_ptr()->add_offset(offset); 716 assert(atype->isa_oop_ptr(), "still an oop"); 717 } 718 offset = atype->is_ptr()->_offset; 719 if (offset != Type::OffsetBot) offset += disp32; 720 return offset; 721 } 722 723 static inline jdouble replicate_immI(int con, int count, int width) { 724 // Load a constant replicated "count" times with width "width" 725 assert(count*width == 8 && width <= 4, "sanity"); 726 int bit_width = width * 8; 727 jlong val = con; 728 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 729 for (int i = 0; i < count - 1; i++) { 730 val |= (val << bit_width); 731 } 732 jdouble dval = *((jdouble*) &val); // coerce to double type 733 return dval; 734 } 735 736 static inline jdouble replicate_immF(float con) { 737 // Replicate float con 2 times and pack into vector. 738 int val = *((int*)&con); 739 jlong lval = val; 740 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 741 jdouble dval = *((jdouble*) &lval); // coerce to double type 742 return dval; 743 } 744 745 // Standard Sparc opcode form2 field breakdown 746 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 747 f0 &= (1<<19)-1; // Mask displacement to 19 bits 748 int op = (f30 << 30) | 749 (f29 << 29) | 750 (f25 << 25) | 751 (f22 << 22) | 752 (f20 << 20) | 753 (f19 << 19) | 754 (f0 << 0); 755 cbuf.insts()->emit_int32(op); 756 } 757 758 // Standard Sparc opcode form2 field breakdown 759 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 760 f0 >>= 10; // Drop 10 bits 761 f0 &= (1<<22)-1; // Mask displacement to 22 bits 762 int op = (f30 << 30) | 763 (f25 << 25) | 764 (f22 << 22) | 765 (f0 << 0); 766 cbuf.insts()->emit_int32(op); 767 } 768 769 // Standard Sparc opcode form3 field breakdown 770 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 771 int op = (f30 << 30) | 772 (f25 << 25) | 773 (f19 << 19) | 774 (f14 << 14) | 775 (f5 << 5) | 776 (f0 << 0); 777 cbuf.insts()->emit_int32(op); 778 } 779 780 // Standard Sparc opcode form3 field breakdown 781 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 782 simm13 &= (1<<13)-1; // Mask to 13 bits 783 int op = (f30 << 30) | 784 (f25 << 25) | 785 (f19 << 19) | 786 (f14 << 14) | 787 (1 << 13) | // bit to indicate immediate-mode 788 (simm13<<0); 789 cbuf.insts()->emit_int32(op); 790 } 791 792 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 793 simm10 &= (1<<10)-1; // Mask to 10 bits 794 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 795 } 796 797 #ifdef ASSERT 798 // Helper function for VerifyOops in emit_form3_mem_reg 799 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 800 warning("VerifyOops encountered unexpected instruction:"); 801 n->dump(2); 802 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 803 } 804 #endif 805 806 807 void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary, 808 int src1_enc, int disp32, int src2_enc, int dst_enc) { 809 810 #ifdef ASSERT 811 // The following code implements the +VerifyOops feature. 812 // It verifies oop values which are loaded into or stored out of 813 // the current method activation. +VerifyOops complements techniques 814 // like ScavengeALot, because it eagerly inspects oops in transit, 815 // as they enter or leave the stack, as opposed to ScavengeALot, 816 // which inspects oops "at rest", in the stack or heap, at safepoints. 817 // For this reason, +VerifyOops can sometimes detect bugs very close 818 // to their point of creation. It can also serve as a cross-check 819 // on the validity of oop maps, when used toegether with ScavengeALot. 820 821 // It would be good to verify oops at other points, especially 822 // when an oop is used as a base pointer for a load or store. 823 // This is presently difficult, because it is hard to know when 824 // a base address is biased or not. (If we had such information, 825 // it would be easy and useful to make a two-argument version of 826 // verify_oop which unbiases the base, and performs verification.) 827 828 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 829 bool is_verified_oop_base = false; 830 bool is_verified_oop_load = false; 831 bool is_verified_oop_store = false; 832 int tmp_enc = -1; 833 if (VerifyOops && src1_enc != R_SP_enc) { 834 // classify the op, mainly for an assert check 835 int st_op = 0, ld_op = 0; 836 switch (primary) { 837 case Assembler::stb_op3: st_op = Op_StoreB; break; 838 case Assembler::sth_op3: st_op = Op_StoreC; break; 839 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 840 case Assembler::stw_op3: st_op = Op_StoreI; break; 841 case Assembler::std_op3: st_op = Op_StoreL; break; 842 case Assembler::stf_op3: st_op = Op_StoreF; break; 843 case Assembler::stdf_op3: st_op = Op_StoreD; break; 844 845 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 846 case Assembler::ldub_op3: ld_op = Op_LoadUB; break; 847 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 848 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 849 case Assembler::ldx_op3: // may become LoadP or stay LoadI 850 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 851 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 852 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 853 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 854 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 855 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 856 857 default: ShouldNotReachHere(); 858 } 859 if (tertiary == REGP_OP) { 860 if (st_op == Op_StoreI) st_op = Op_StoreP; 861 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 862 else ShouldNotReachHere(); 863 if (st_op) { 864 // a store 865 // inputs are (0:control, 1:memory, 2:address, 3:value) 866 Node* n2 = n->in(3); 867 if (n2 != NULL) { 868 const Type* t = n2->bottom_type(); 869 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 870 } 871 } else { 872 // a load 873 const Type* t = n->bottom_type(); 874 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 875 } 876 } 877 878 if (ld_op) { 879 // a Load 880 // inputs are (0:control, 1:memory, 2:address) 881 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 882 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 883 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 884 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 885 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 886 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 887 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 888 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 889 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 890 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 891 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 892 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 893 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) && 894 !(n->rule() == loadUB_rule)) { 895 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 896 } 897 } else if (st_op) { 898 // a Store 899 // inputs are (0:control, 1:memory, 2:address, 3:value) 900 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 901 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 902 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 903 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 904 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 905 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) && 906 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 907 verify_oops_warning(n, n->ideal_Opcode(), st_op); 908 } 909 } 910 911 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 912 Node* addr = n->in(2); 913 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 914 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 915 if (atype != NULL) { 916 intptr_t offset = get_offset_from_base(n, atype, disp32); 917 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 918 if (offset != offset_2) { 919 get_offset_from_base(n, atype, disp32); 920 get_offset_from_base_2(n, atype, disp32); 921 } 922 assert(offset == offset_2, "different offsets"); 923 if (offset == disp32) { 924 // we now know that src1 is a true oop pointer 925 is_verified_oop_base = true; 926 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 927 if( primary == Assembler::ldd_op3 ) { 928 is_verified_oop_base = false; // Cannot 'ldd' into O7 929 } else { 930 tmp_enc = dst_enc; 931 dst_enc = R_O7_enc; // Load into O7; preserve source oop 932 assert(src1_enc != dst_enc, ""); 933 } 934 } 935 } 936 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 937 || offset == oopDesc::mark_offset_in_bytes())) { 938 // loading the mark should not be allowed either, but 939 // we don't check this since it conflicts with InlineObjectHash 940 // usage of LoadINode to get the mark. We could keep the 941 // check if we create a new LoadMarkNode 942 // but do not verify the object before its header is initialized 943 ShouldNotReachHere(); 944 } 945 } 946 } 947 } 948 } 949 #endif 950 951 uint instr; 952 instr = (Assembler::ldst_op << 30) 953 | (dst_enc << 25) 954 | (primary << 19) 955 | (src1_enc << 14); 956 957 uint index = src2_enc; 958 int disp = disp32; 959 960 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) { 961 disp += STACK_BIAS; 962 // Quick fix for JDK-8029668: check that stack offset fits, bailout if not 963 if (!Assembler::is_simm13(disp)) { 964 ra->C->record_method_not_compilable("unable to handle large constant offsets"); 965 return; 966 } 967 } 968 969 // We should have a compiler bailout here rather than a guarantee. 970 // Better yet would be some mechanism to handle variable-size matches correctly. 971 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); 972 973 if( disp == 0 ) { 974 // use reg-reg form 975 // bit 13 is already zero 976 instr |= index; 977 } else { 978 // use reg-imm form 979 instr |= 0x00002000; // set bit 13 to one 980 instr |= disp & 0x1FFF; 981 } 982 983 cbuf.insts()->emit_int32(instr); 984 985 #ifdef ASSERT 986 { 987 MacroAssembler _masm(&cbuf); 988 if (is_verified_oop_base) { 989 __ verify_oop(reg_to_register_object(src1_enc)); 990 } 991 if (is_verified_oop_store) { 992 __ verify_oop(reg_to_register_object(dst_enc)); 993 } 994 if (tmp_enc != -1) { 995 __ mov(O7, reg_to_register_object(tmp_enc)); 996 } 997 if (is_verified_oop_load) { 998 __ verify_oop(reg_to_register_object(dst_enc)); 999 } 1000 } 1001 #endif 1002 } 1003 1004 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { 1005 // The method which records debug information at every safepoint 1006 // expects the call to be the first instruction in the snippet as 1007 // it creates a PcDesc structure which tracks the offset of a call 1008 // from the start of the codeBlob. This offset is computed as 1009 // code_end() - code_begin() of the code which has been emitted 1010 // so far. 1011 // In this particular case we have skirted around the problem by 1012 // putting the "mov" instruction in the delay slot but the problem 1013 // may bite us again at some other point and a cleaner/generic 1014 // solution using relocations would be needed. 1015 MacroAssembler _masm(&cbuf); 1016 __ set_inst_mark(); 1017 1018 // We flush the current window just so that there is a valid stack copy 1019 // the fact that the current window becomes active again instantly is 1020 // not a problem there is nothing live in it. 1021 1022 #ifdef ASSERT 1023 int startpos = __ offset(); 1024 #endif /* ASSERT */ 1025 1026 __ call((address)entry_point, rtype); 1027 1028 if (preserve_g2) __ delayed()->mov(G2, L7); 1029 else __ delayed()->nop(); 1030 1031 if (preserve_g2) __ mov(L7, G2); 1032 1033 #ifdef ASSERT 1034 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 1035 #ifdef _LP64 1036 // Trash argument dump slots. 1037 __ set(0xb0b8ac0db0b8ac0d, G1); 1038 __ mov(G1, G5); 1039 __ stx(G1, SP, STACK_BIAS + 0x80); 1040 __ stx(G1, SP, STACK_BIAS + 0x88); 1041 __ stx(G1, SP, STACK_BIAS + 0x90); 1042 __ stx(G1, SP, STACK_BIAS + 0x98); 1043 __ stx(G1, SP, STACK_BIAS + 0xA0); 1044 __ stx(G1, SP, STACK_BIAS + 0xA8); 1045 #else // _LP64 1046 // this is also a native call, so smash the first 7 stack locations, 1047 // and the various registers 1048 1049 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset], 1050 // while [SP+0x44..0x58] are the argument dump slots. 1051 __ set((intptr_t)0xbaadf00d, G1); 1052 __ mov(G1, G5); 1053 __ sllx(G1, 32, G1); 1054 __ or3(G1, G5, G1); 1055 __ mov(G1, G5); 1056 __ stx(G1, SP, 0x40); 1057 __ stx(G1, SP, 0x48); 1058 __ stx(G1, SP, 0x50); 1059 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot 1060 #endif // _LP64 1061 } 1062 #endif /*ASSERT*/ 1063 } 1064 1065 //============================================================================= 1066 // REQUIRED FUNCTIONALITY for encoding 1067 void emit_lo(CodeBuffer &cbuf, int val) { } 1068 void emit_hi(CodeBuffer &cbuf, int val) { } 1069 1070 1071 //============================================================================= 1072 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1073 1074 int Compile::ConstantTable::calculate_table_base_offset() const { 1075 if (UseRDPCForConstantTableBase) { 1076 // The table base offset might be less but then it fits into 1077 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1078 return Assembler::min_simm13(); 1079 } else { 1080 int offset = -(size() / 2); 1081 if (!Assembler::is_simm13(offset)) { 1082 offset = Assembler::min_simm13(); 1083 } 1084 return offset; 1085 } 1086 } 1087 1088 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } 1089 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { 1090 ShouldNotReachHere(); 1091 } 1092 1093 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1094 Compile* C = ra_->C; 1095 Compile::ConstantTable& constant_table = C->constant_table(); 1096 MacroAssembler _masm(&cbuf); 1097 1098 Register r = as_Register(ra_->get_encode(this)); 1099 CodeSection* consts_section = __ code()->consts(); 1100 int consts_size = consts_section->align_at_start(consts_section->size()); 1101 assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size); 1102 1103 if (UseRDPCForConstantTableBase) { 1104 // For the following RDPC logic to work correctly the consts 1105 // section must be allocated right before the insts section. This 1106 // assert checks for that. The layout and the SECT_* constants 1107 // are defined in src/share/vm/asm/codeBuffer.hpp. 1108 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1109 int insts_offset = __ offset(); 1110 1111 // Layout: 1112 // 1113 // |----------- consts section ------------|----------- insts section -----------... 1114 // |------ constant table -----|- padding -|------------------x---- 1115 // \ current PC (RDPC instruction) 1116 // |<------------- consts_size ----------->|<- insts_offset ->| 1117 // \ table base 1118 // The table base offset is later added to the load displacement 1119 // so it has to be negative. 1120 int table_base_offset = -(consts_size + insts_offset); 1121 int disp; 1122 1123 // If the displacement from the current PC to the constant table 1124 // base fits into simm13 we set the constant table base to the 1125 // current PC. 1126 if (Assembler::is_simm13(table_base_offset)) { 1127 constant_table.set_table_base_offset(table_base_offset); 1128 disp = 0; 1129 } else { 1130 // Otherwise we set the constant table base offset to the 1131 // maximum negative displacement of load instructions to keep 1132 // the disp as small as possible: 1133 // 1134 // |<------------- consts_size ----------->|<- insts_offset ->| 1135 // |<--------- min_simm13 --------->|<-------- disp --------->| 1136 // \ table base 1137 table_base_offset = Assembler::min_simm13(); 1138 constant_table.set_table_base_offset(table_base_offset); 1139 disp = (consts_size + insts_offset) + table_base_offset; 1140 } 1141 1142 __ rdpc(r); 1143 1144 if (disp != 0) { 1145 assert(r != O7, "need temporary"); 1146 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1147 } 1148 } 1149 else { 1150 // Materialize the constant table base. 1151 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1152 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1153 AddressLiteral base(baseaddr, rspec); 1154 __ set(base, r); 1155 } 1156 } 1157 1158 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1159 if (UseRDPCForConstantTableBase) { 1160 // This is really the worst case but generally it's only 1 instruction. 1161 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1162 } else { 1163 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1164 } 1165 } 1166 1167 #ifndef PRODUCT 1168 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1169 char reg[128]; 1170 ra_->dump_register(this, reg); 1171 if (UseRDPCForConstantTableBase) { 1172 st->print("RDPC %s\t! constant table base", reg); 1173 } else { 1174 st->print("SET &constanttable,%s\t! constant table base", reg); 1175 } 1176 } 1177 #endif 1178 1179 1180 //============================================================================= 1181 1182 #ifndef PRODUCT 1183 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1184 Compile* C = ra_->C; 1185 1186 for (int i = 0; i < OptoPrologueNops; i++) { 1187 st->print_cr("NOP"); st->print("\t"); 1188 } 1189 1190 if( VerifyThread ) { 1191 st->print_cr("Verify_Thread"); st->print("\t"); 1192 } 1193 1194 size_t framesize = C->frame_size_in_bytes(); 1195 int bangsize = C->bang_size_in_bytes(); 1196 1197 // Calls to C2R adapters often do not accept exceptional returns. 1198 // We require that their callers must bang for them. But be careful, because 1199 // some VM calls (such as call site linkage) can use several kilobytes of 1200 // stack. But the stack safety zone should account for that. 1201 // See bugs 4446381, 4468289, 4497237. 1202 if (C->need_stack_bang(bangsize)) { 1203 st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t"); 1204 } 1205 1206 if (Assembler::is_simm13(-framesize)) { 1207 st->print ("SAVE R_SP,-" SIZE_FORMAT ",R_SP",framesize); 1208 } else { 1209 st->print_cr("SETHI R_SP,hi%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); 1210 st->print_cr("ADD R_G3,lo%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); 1211 st->print ("SAVE R_SP,R_G3,R_SP"); 1212 } 1213 1214 } 1215 #endif 1216 1217 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1218 Compile* C = ra_->C; 1219 MacroAssembler _masm(&cbuf); 1220 1221 for (int i = 0; i < OptoPrologueNops; i++) { 1222 __ nop(); 1223 } 1224 1225 __ verify_thread(); 1226 1227 size_t framesize = C->frame_size_in_bytes(); 1228 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1229 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1230 int bangsize = C->bang_size_in_bytes(); 1231 1232 // Calls to C2R adapters often do not accept exceptional returns. 1233 // We require that their callers must bang for them. But be careful, because 1234 // some VM calls (such as call site linkage) can use several kilobytes of 1235 // stack. But the stack safety zone should account for that. 1236 // See bugs 4446381, 4468289, 4497237. 1237 if (C->need_stack_bang(bangsize)) { 1238 __ generate_stack_overflow_check(bangsize); 1239 } 1240 1241 if (Assembler::is_simm13(-framesize)) { 1242 __ save(SP, -framesize, SP); 1243 } else { 1244 __ sethi(-framesize & ~0x3ff, G3); 1245 __ add(G3, -framesize & 0x3ff, G3); 1246 __ save(SP, G3, SP); 1247 } 1248 C->set_frame_complete( __ offset() ); 1249 1250 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1251 // NOTE: We set the table base offset here because users might be 1252 // emitted before MachConstantBaseNode. 1253 Compile::ConstantTable& constant_table = C->constant_table(); 1254 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1255 } 1256 } 1257 1258 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1259 return MachNode::size(ra_); 1260 } 1261 1262 int MachPrologNode::reloc() const { 1263 return 10; // a large enough number 1264 } 1265 1266 //============================================================================= 1267 #ifndef PRODUCT 1268 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1269 Compile* C = ra_->C; 1270 1271 if(do_polling() && ra_->C->is_method_compilation()) { 1272 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1273 #ifdef _LP64 1274 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1275 #else 1276 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t"); 1277 #endif 1278 } 1279 1280 if(do_polling()) { 1281 if (UseCBCond && !ra_->C->is_method_compilation()) { 1282 st->print("NOP\n\t"); 1283 } 1284 st->print("RET\n\t"); 1285 } 1286 1287 st->print("RESTORE"); 1288 } 1289 #endif 1290 1291 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1292 MacroAssembler _masm(&cbuf); 1293 Compile* C = ra_->C; 1294 1295 __ verify_thread(); 1296 1297 if (StackReservedPages > 0 && C->has_reserved_stack_access()) { 1298 __ reserved_stack_check(); 1299 } 1300 1301 // If this does safepoint polling, then do it here 1302 if(do_polling() && ra_->C->is_method_compilation()) { 1303 AddressLiteral polling_page(os::get_polling_page()); 1304 __ sethi(polling_page, L0); 1305 __ relocate(relocInfo::poll_return_type); 1306 __ ld_ptr(L0, 0, G0); 1307 } 1308 1309 // If this is a return, then stuff the restore in the delay slot 1310 if(do_polling()) { 1311 if (UseCBCond && !ra_->C->is_method_compilation()) { 1312 // Insert extra padding for the case when the epilogue is preceded by 1313 // a cbcond jump, which can't be followed by a CTI instruction 1314 __ nop(); 1315 } 1316 __ ret(); 1317 __ delayed()->restore(); 1318 } else { 1319 __ restore(); 1320 } 1321 } 1322 1323 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1324 return MachNode::size(ra_); 1325 } 1326 1327 int MachEpilogNode::reloc() const { 1328 return 16; // a large enough number 1329 } 1330 1331 const Pipeline * MachEpilogNode::pipeline() const { 1332 return MachNode::pipeline_class(); 1333 } 1334 1335 int MachEpilogNode::safepoint_offset() const { 1336 assert( do_polling(), "no return for this epilog node"); 1337 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1338 } 1339 1340 //============================================================================= 1341 1342 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1343 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1344 static enum RC rc_class( OptoReg::Name reg ) { 1345 if( !OptoReg::is_valid(reg) ) return rc_bad; 1346 if (OptoReg::is_stack(reg)) return rc_stack; 1347 VMReg r = OptoReg::as_VMReg(reg); 1348 if (r->is_Register()) return rc_int; 1349 assert(r->is_FloatRegister(), "must be"); 1350 return rc_float; 1351 } 1352 1353 static int impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) { 1354 if (cbuf) { 1355 emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1356 } 1357 #ifndef PRODUCT 1358 else if (!do_size) { 1359 if (size != 0) st->print("\n\t"); 1360 if (is_load) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg)); 1361 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset); 1362 } 1363 #endif 1364 return size+4; 1365 } 1366 1367 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) { 1368 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] ); 1369 #ifndef PRODUCT 1370 else if( !do_size ) { 1371 if( size != 0 ) st->print("\n\t"); 1372 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst)); 1373 } 1374 #endif 1375 return size+4; 1376 } 1377 1378 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, 1379 PhaseRegAlloc *ra_, 1380 bool do_size, 1381 outputStream* st ) const { 1382 // Get registers to move 1383 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1384 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1385 OptoReg::Name dst_second = ra_->get_reg_second(this ); 1386 OptoReg::Name dst_first = ra_->get_reg_first(this ); 1387 1388 enum RC src_second_rc = rc_class(src_second); 1389 enum RC src_first_rc = rc_class(src_first); 1390 enum RC dst_second_rc = rc_class(dst_second); 1391 enum RC dst_first_rc = rc_class(dst_first); 1392 1393 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); 1394 1395 // Generate spill code! 1396 int size = 0; 1397 1398 if( src_first == dst_first && src_second == dst_second ) 1399 return size; // Self copy, no move 1400 1401 // -------------------------------------- 1402 // Check for mem-mem move. Load into unused float registers and fall into 1403 // the float-store case. 1404 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1405 int offset = ra_->reg2offset(src_first); 1406 // Further check for aligned-adjacent pair, so we can use a double load 1407 if( (src_first&1)==0 && src_first+1 == src_second ) { 1408 src_second = OptoReg::Name(R_F31_num); 1409 src_second_rc = rc_float; 1410 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st); 1411 } else { 1412 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st); 1413 } 1414 src_first = OptoReg::Name(R_F30_num); 1415 src_first_rc = rc_float; 1416 } 1417 1418 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1419 int offset = ra_->reg2offset(src_second); 1420 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st); 1421 src_second = OptoReg::Name(R_F31_num); 1422 src_second_rc = rc_float; 1423 } 1424 1425 // -------------------------------------- 1426 // Check for float->int copy; requires a trip through memory 1427 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1428 int offset = frame::register_save_words*wordSize; 1429 if (cbuf) { 1430 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); 1431 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1432 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1433 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 ); 1434 } 1435 #ifndef PRODUCT 1436 else if (!do_size) { 1437 if (size != 0) st->print("\n\t"); 1438 st->print( "SUB R_SP,16,R_SP\n"); 1439 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1440 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1441 st->print("\tADD R_SP,16,R_SP\n"); 1442 } 1443 #endif 1444 size += 16; 1445 } 1446 1447 // Check for float->int copy on T4 1448 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1449 // Further check for aligned-adjacent pair, so we can use a double move 1450 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1451 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st); 1452 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st); 1453 } 1454 // Check for int->float copy on T4 1455 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1456 // Further check for aligned-adjacent pair, so we can use a double move 1457 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1458 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st); 1459 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st); 1460 } 1461 1462 // -------------------------------------- 1463 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1464 // In such cases, I have to do the big-endian swap. For aligned targets, the 1465 // hardware does the flop for me. Doubles are always aligned, so no problem 1466 // there. Misaligned sources only come from native-long-returns (handled 1467 // special below). 1468 #ifndef _LP64 1469 if( src_first_rc == rc_int && // source is already big-endian 1470 src_second_rc != rc_bad && // 64-bit move 1471 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst 1472 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" ); 1473 // Do the big-endian flop. 1474 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ; 1475 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc; 1476 } 1477 #endif 1478 1479 // -------------------------------------- 1480 // Check for integer reg-reg copy 1481 if( src_first_rc == rc_int && dst_first_rc == rc_int ) { 1482 #ifndef _LP64 1483 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case 1484 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1485 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1486 // operand contains the least significant word of the 64-bit value and vice versa. 1487 OptoReg::Name tmp = OptoReg::Name(R_O7_num); 1488 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" ); 1489 // Shift O0 left in-place, zero-extend O1, then OR them into the dst 1490 if( cbuf ) { 1491 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 ); 1492 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 ); 1493 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] ); 1494 #ifndef PRODUCT 1495 } else if( !do_size ) { 1496 if( size != 0 ) st->print("\n\t"); 1497 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp)); 1498 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second)); 1499 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first)); 1500 #endif 1501 } 1502 return size+12; 1503 } 1504 else if( dst_first == R_I0_num && dst_second == R_I1_num ) { 1505 // returning a long value in I0/I1 1506 // a SpillCopy must be able to target a return instruction's reg_class 1507 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1508 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1509 // operand contains the least significant word of the 64-bit value and vice versa. 1510 OptoReg::Name tdest = dst_first; 1511 1512 if (src_first == dst_first) { 1513 tdest = OptoReg::Name(R_O7_num); 1514 size += 4; 1515 } 1516 1517 if( cbuf ) { 1518 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg"); 1519 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1 1520 // ShrL_reg_imm6 1521 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 ); 1522 // ShrR_reg_imm6 src, 0, dst 1523 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 ); 1524 if (tdest != dst_first) { 1525 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] ); 1526 } 1527 } 1528 #ifndef PRODUCT 1529 else if( !do_size ) { 1530 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!! 1531 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest)); 1532 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second)); 1533 if (tdest != dst_first) { 1534 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first)); 1535 } 1536 } 1537 #endif // PRODUCT 1538 return size+8; 1539 } 1540 #endif // !_LP64 1541 // Else normal reg-reg copy 1542 assert( src_second != dst_first, "smashed second before evacuating it" ); 1543 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st); 1544 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" ); 1545 // This moves an aligned adjacent pair. 1546 // See if we are done. 1547 if( src_first+1 == src_second && dst_first+1 == dst_second ) 1548 return size; 1549 } 1550 1551 // Check for integer store 1552 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) { 1553 int offset = ra_->reg2offset(dst_first); 1554 // Further check for aligned-adjacent pair, so we can use a double store 1555 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1556 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st); 1557 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st); 1558 } 1559 1560 // Check for integer load 1561 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) { 1562 int offset = ra_->reg2offset(src_first); 1563 // Further check for aligned-adjacent pair, so we can use a double load 1564 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1565 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st); 1566 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1567 } 1568 1569 // Check for float reg-reg copy 1570 if( src_first_rc == rc_float && dst_first_rc == rc_float ) { 1571 // Further check for aligned-adjacent pair, so we can use a double move 1572 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1573 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st); 1574 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st); 1575 } 1576 1577 // Check for float store 1578 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1579 int offset = ra_->reg2offset(dst_first); 1580 // Further check for aligned-adjacent pair, so we can use a double store 1581 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1582 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st); 1583 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1584 } 1585 1586 // Check for float load 1587 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) { 1588 int offset = ra_->reg2offset(src_first); 1589 // Further check for aligned-adjacent pair, so we can use a double load 1590 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1591 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st); 1592 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st); 1593 } 1594 1595 // -------------------------------------------------------------------- 1596 // Check for hi bits still needing moving. Only happens for misaligned 1597 // arguments to native calls. 1598 if( src_second == dst_second ) 1599 return size; // Self copy; no move 1600 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); 1601 1602 #ifndef _LP64 1603 // In the LP64 build, all registers can be moved as aligned/adjacent 1604 // pairs, so there's never any need to move the high bits separately. 1605 // The 32-bit builds have to deal with the 32-bit ABI which can force 1606 // all sorts of silly alignment problems. 1607 1608 // Check for integer reg-reg copy. Hi bits are stuck up in the top 1609 // 32-bits of a 64-bit register, but are needed in low bits of another 1610 // register (else it's a hi-bits-to-hi-bits copy which should have 1611 // happened already as part of a 64-bit move) 1612 if( src_second_rc == rc_int && dst_second_rc == rc_int ) { 1613 assert( (src_second&1)==1, "its the evil O0/O1 native return case" ); 1614 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" ); 1615 // Shift src_second down to dst_second's low bits. 1616 if( cbuf ) { 1617 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1618 #ifndef PRODUCT 1619 } else if( !do_size ) { 1620 if( size != 0 ) st->print("\n\t"); 1621 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second)); 1622 #endif 1623 } 1624 return size+4; 1625 } 1626 1627 // Check for high word integer store. Must down-shift the hi bits 1628 // into a temp register, then fall into the case of storing int bits. 1629 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) { 1630 // Shift src_second down to dst_second's low bits. 1631 if( cbuf ) { 1632 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1633 #ifndef PRODUCT 1634 } else if( !do_size ) { 1635 if( size != 0 ) st->print("\n\t"); 1636 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num)); 1637 #endif 1638 } 1639 size+=4; 1640 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num! 1641 } 1642 1643 // Check for high word integer load 1644 if( dst_second_rc == rc_int && src_second_rc == rc_stack ) 1645 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st); 1646 1647 // Check for high word integer store 1648 if( src_second_rc == rc_int && dst_second_rc == rc_stack ) 1649 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st); 1650 1651 // Check for high word float store 1652 if( src_second_rc == rc_float && dst_second_rc == rc_stack ) 1653 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st); 1654 1655 #endif // !_LP64 1656 1657 Unimplemented(); 1658 } 1659 1660 #ifndef PRODUCT 1661 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1662 implementation( NULL, ra_, false, st ); 1663 } 1664 #endif 1665 1666 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1667 implementation( &cbuf, ra_, false, NULL ); 1668 } 1669 1670 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1671 return implementation( NULL, ra_, true, NULL ); 1672 } 1673 1674 //============================================================================= 1675 #ifndef PRODUCT 1676 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const { 1677 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1678 } 1679 #endif 1680 1681 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { 1682 MacroAssembler _masm(&cbuf); 1683 for(int i = 0; i < _count; i += 1) { 1684 __ nop(); 1685 } 1686 } 1687 1688 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1689 return 4 * _count; 1690 } 1691 1692 1693 //============================================================================= 1694 #ifndef PRODUCT 1695 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1696 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1697 int reg = ra_->get_reg_first(this); 1698 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1699 } 1700 #endif 1701 1702 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1703 MacroAssembler _masm(&cbuf); 1704 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1705 int reg = ra_->get_encode(this); 1706 1707 if (Assembler::is_simm13(offset)) { 1708 __ add(SP, offset, reg_to_register_object(reg)); 1709 } else { 1710 __ set(offset, O7); 1711 __ add(SP, O7, reg_to_register_object(reg)); 1712 } 1713 } 1714 1715 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1716 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1717 assert(ra_ == ra_->C->regalloc(), "sanity"); 1718 return ra_->C->scratch_emit_size(this); 1719 } 1720 1721 //============================================================================= 1722 #ifndef PRODUCT 1723 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1724 st->print_cr("\nUEP:"); 1725 #ifdef _LP64 1726 if (UseCompressedClassPointers) { 1727 assert(Universe::heap() != NULL, "java heap should be initialized"); 1728 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1729 if (Universe::narrow_klass_base() != 0) { 1730 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); 1731 if (Universe::narrow_klass_shift() != 0) { 1732 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1733 } 1734 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1735 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); 1736 } else { 1737 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1738 } 1739 } else { 1740 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1741 } 1742 st->print_cr("\tCMP R_G5,R_G3" ); 1743 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1744 #else // _LP64 1745 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1746 st->print_cr("\tCMP R_G5,R_G3" ); 1747 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1748 #endif // _LP64 1749 } 1750 #endif 1751 1752 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1753 MacroAssembler _masm(&cbuf); 1754 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1755 Register temp_reg = G3; 1756 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1757 1758 // Load klass from receiver 1759 __ load_klass(O0, temp_reg); 1760 // Compare against expected klass 1761 __ cmp(temp_reg, G5_ic_reg); 1762 // Branch to miss code, checks xcc or icc depending 1763 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1764 } 1765 1766 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1767 return MachNode::size(ra_); 1768 } 1769 1770 1771 //============================================================================= 1772 1773 1774 // Emit exception handler code. 1775 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { 1776 Register temp_reg = G3; 1777 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1778 MacroAssembler _masm(&cbuf); 1779 1780 address base = __ start_a_stub(size_exception_handler()); 1781 if (base == NULL) { 1782 ciEnv::current()->record_failure("CodeCache is full"); 1783 return 0; // CodeBuffer::expand failed 1784 } 1785 1786 int offset = __ offset(); 1787 1788 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1789 __ delayed()->nop(); 1790 1791 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1792 1793 __ end_a_stub(); 1794 1795 return offset; 1796 } 1797 1798 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { 1799 // Can't use any of the current frame's registers as we may have deopted 1800 // at a poll and everything (including G3) can be live. 1801 Register temp_reg = L0; 1802 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1803 MacroAssembler _masm(&cbuf); 1804 1805 address base = __ start_a_stub(size_deopt_handler()); 1806 if (base == NULL) { 1807 ciEnv::current()->record_failure("CodeCache is full"); 1808 return 0; // CodeBuffer::expand failed 1809 } 1810 1811 int offset = __ offset(); 1812 __ save_frame(0); 1813 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1814 __ delayed()->restore(); 1815 1816 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1817 1818 __ end_a_stub(); 1819 return offset; 1820 1821 } 1822 1823 // Given a register encoding, produce a Integer Register object 1824 static Register reg_to_register_object(int register_encoding) { 1825 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1826 return as_Register(register_encoding); 1827 } 1828 1829 // Given a register encoding, produce a single-precision Float Register object 1830 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1831 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1832 return as_SingleFloatRegister(register_encoding); 1833 } 1834 1835 // Given a register encoding, produce a double-precision Float Register object 1836 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1837 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1838 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1839 return as_DoubleFloatRegister(register_encoding); 1840 } 1841 1842 const bool Matcher::match_rule_supported(int opcode) { 1843 if (!has_match_rule(opcode)) 1844 return false; 1845 1846 switch (opcode) { 1847 case Op_CountLeadingZerosI: 1848 case Op_CountLeadingZerosL: 1849 case Op_CountTrailingZerosI: 1850 case Op_CountTrailingZerosL: 1851 case Op_PopCountI: 1852 case Op_PopCountL: 1853 if (!UsePopCountInstruction) 1854 return false; 1855 case Op_CompareAndSwapL: 1856 #ifdef _LP64 1857 case Op_CompareAndSwapP: 1858 #endif 1859 if (!VM_Version::supports_cx8()) 1860 return false; 1861 break; 1862 } 1863 1864 return true; // Per default match rules are supported. 1865 } 1866 1867 const int Matcher::float_pressure(int default_pressure_threshold) { 1868 return default_pressure_threshold; 1869 } 1870 1871 int Matcher::regnum_to_fpu_offset(int regnum) { 1872 return regnum - 32; // The FP registers are in the second chunk 1873 } 1874 1875 #ifdef ASSERT 1876 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1877 #endif 1878 1879 // Vector width in bytes 1880 const int Matcher::vector_width_in_bytes(BasicType bt) { 1881 assert(MaxVectorSize == 8, ""); 1882 return 8; 1883 } 1884 1885 // Vector ideal reg 1886 const int Matcher::vector_ideal_reg(int size) { 1887 assert(MaxVectorSize == 8, ""); 1888 return Op_RegD; 1889 } 1890 1891 const int Matcher::vector_shift_count_ideal_reg(int size) { 1892 fatal("vector shift is not supported"); 1893 return Node::NotAMachineReg; 1894 } 1895 1896 // Limits on vector size (number of elements) loaded into vector. 1897 const int Matcher::max_vector_size(const BasicType bt) { 1898 assert(is_java_primitive(bt), "only primitive type vectors"); 1899 return vector_width_in_bytes(bt)/type2aelembytes(bt); 1900 } 1901 1902 const int Matcher::min_vector_size(const BasicType bt) { 1903 return max_vector_size(bt); // Same as max. 1904 } 1905 1906 // SPARC doesn't support misaligned vectors store/load. 1907 const bool Matcher::misaligned_vectors_ok() { 1908 return false; 1909 } 1910 1911 // Current (2013) SPARC platforms need to read original key 1912 // to construct decryption expanded key 1913 const bool Matcher::pass_original_key_for_aes() { 1914 return true; 1915 } 1916 1917 // USII supports fxtof through the whole range of number, USIII doesn't 1918 const bool Matcher::convL2FSupported(void) { 1919 return VM_Version::has_fast_fxtof(); 1920 } 1921 1922 // Is this branch offset short enough that a short branch can be used? 1923 // 1924 // NOTE: If the platform does not provide any short branch variants, then 1925 // this method should return false for offset 0. 1926 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1927 // The passed offset is relative to address of the branch. 1928 // Don't need to adjust the offset. 1929 return UseCBCond && Assembler::is_simm12(offset); 1930 } 1931 1932 const bool Matcher::isSimpleConstant64(jlong value) { 1933 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1934 // Depends on optimizations in MacroAssembler::setx. 1935 int hi = (int)(value >> 32); 1936 int lo = (int)(value & ~0); 1937 return (hi == 0) || (hi == -1) || (lo == 0); 1938 } 1939 1940 // No scaling for the parameter the ClearArray node. 1941 const bool Matcher::init_array_count_is_in_bytes = true; 1942 1943 // Threshold size for cleararray. 1944 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1945 1946 // No additional cost for CMOVL. 1947 const int Matcher::long_cmove_cost() { return 0; } 1948 1949 // CMOVF/CMOVD are expensive on T4 and on SPARC64. 1950 const int Matcher::float_cmove_cost() { 1951 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0; 1952 } 1953 1954 // Does the CPU require late expand (see block.cpp for description of late expand)? 1955 const bool Matcher::require_postalloc_expand = false; 1956 1957 // Should the Matcher clone shifts on addressing modes, expecting them to 1958 // be subsumed into complex addressing expressions or compute them into 1959 // registers? True for Intel but false for most RISCs 1960 const bool Matcher::clone_shift_expressions = false; 1961 1962 // Do we need to mask the count passed to shift instructions or does 1963 // the cpu only look at the lower 5/6 bits anyway? 1964 const bool Matcher::need_masked_shift_count = false; 1965 1966 bool Matcher::narrow_oop_use_complex_address() { 1967 NOT_LP64(ShouldNotCallThis()); 1968 assert(UseCompressedOops, "only for compressed oops code"); 1969 return false; 1970 } 1971 1972 bool Matcher::narrow_klass_use_complex_address() { 1973 NOT_LP64(ShouldNotCallThis()); 1974 assert(UseCompressedClassPointers, "only for compressed klass code"); 1975 return false; 1976 } 1977 1978 // Is it better to copy float constants, or load them directly from memory? 1979 // Intel can load a float constant from a direct address, requiring no 1980 // extra registers. Most RISCs will have to materialize an address into a 1981 // register first, so they would do better to copy the constant from stack. 1982 const bool Matcher::rematerialize_float_constants = false; 1983 1984 // If CPU can load and store mis-aligned doubles directly then no fixup is 1985 // needed. Else we split the double into 2 integer pieces and move it 1986 // piece-by-piece. Only happens when passing doubles into C code as the 1987 // Java calling convention forces doubles to be aligned. 1988 #ifdef _LP64 1989 const bool Matcher::misaligned_doubles_ok = true; 1990 #else 1991 const bool Matcher::misaligned_doubles_ok = false; 1992 #endif 1993 1994 // No-op on SPARC. 1995 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 1996 } 1997 1998 // Advertise here if the CPU requires explicit rounding operations 1999 // to implement the UseStrictFP mode. 2000 const bool Matcher::strict_fp_requires_explicit_rounding = false; 2001 2002 // Are floats converted to double when stored to stack during deoptimization? 2003 // Sparc does not handle callee-save floats. 2004 bool Matcher::float_in_double() { return false; } 2005 2006 // Do ints take an entire long register or just half? 2007 // Note that we if-def off of _LP64. 2008 // The relevant question is how the int is callee-saved. In _LP64 2009 // the whole long is written but de-opt'ing will have to extract 2010 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 2011 #ifdef _LP64 2012 const bool Matcher::int_in_long = true; 2013 #else 2014 const bool Matcher::int_in_long = false; 2015 #endif 2016 2017 // Return whether or not this register is ever used as an argument. This 2018 // function is used on startup to build the trampoline stubs in generateOptoStub. 2019 // Registers not mentioned will be killed by the VM call in the trampoline, and 2020 // arguments in those registers not be available to the callee. 2021 bool Matcher::can_be_java_arg( int reg ) { 2022 // Standard sparc 6 args in registers 2023 if( reg == R_I0_num || 2024 reg == R_I1_num || 2025 reg == R_I2_num || 2026 reg == R_I3_num || 2027 reg == R_I4_num || 2028 reg == R_I5_num ) return true; 2029 #ifdef _LP64 2030 // 64-bit builds can pass 64-bit pointers and longs in 2031 // the high I registers 2032 if( reg == R_I0H_num || 2033 reg == R_I1H_num || 2034 reg == R_I2H_num || 2035 reg == R_I3H_num || 2036 reg == R_I4H_num || 2037 reg == R_I5H_num ) return true; 2038 2039 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 2040 return true; 2041 } 2042 2043 #else 2044 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4. 2045 // Longs cannot be passed in O regs, because O regs become I regs 2046 // after a 'save' and I regs get their high bits chopped off on 2047 // interrupt. 2048 if( reg == R_G1H_num || reg == R_G1_num ) return true; 2049 if( reg == R_G4H_num || reg == R_G4_num ) return true; 2050 #endif 2051 // A few float args in registers 2052 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 2053 2054 return false; 2055 } 2056 2057 bool Matcher::is_spillable_arg( int reg ) { 2058 return can_be_java_arg(reg); 2059 } 2060 2061 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 2062 // Use hardware SDIVX instruction when it is 2063 // faster than a code which use multiply. 2064 return VM_Version::has_fast_idiv(); 2065 } 2066 2067 // Register for DIVI projection of divmodI 2068 RegMask Matcher::divI_proj_mask() { 2069 ShouldNotReachHere(); 2070 return RegMask(); 2071 } 2072 2073 // Register for MODI projection of divmodI 2074 RegMask Matcher::modI_proj_mask() { 2075 ShouldNotReachHere(); 2076 return RegMask(); 2077 } 2078 2079 // Register for DIVL projection of divmodL 2080 RegMask Matcher::divL_proj_mask() { 2081 ShouldNotReachHere(); 2082 return RegMask(); 2083 } 2084 2085 // Register for MODL projection of divmodL 2086 RegMask Matcher::modL_proj_mask() { 2087 ShouldNotReachHere(); 2088 return RegMask(); 2089 } 2090 2091 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2092 return L7_REGP_mask(); 2093 } 2094 2095 %} 2096 2097 2098 // The intptr_t operand types, defined by textual substitution. 2099 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 2100 #ifdef _LP64 2101 #define immX immL 2102 #define immX13 immL13 2103 #define immX13m7 immL13m7 2104 #define iRegX iRegL 2105 #define g1RegX g1RegL 2106 #else 2107 #define immX immI 2108 #define immX13 immI13 2109 #define immX13m7 immI13m7 2110 #define iRegX iRegI 2111 #define g1RegX g1RegI 2112 #endif 2113 2114 //----------ENCODING BLOCK----------------------------------------------------- 2115 // This block specifies the encoding classes used by the compiler to output 2116 // byte streams. Encoding classes are parameterized macros used by 2117 // Machine Instruction Nodes in order to generate the bit encoding of the 2118 // instruction. Operands specify their base encoding interface with the 2119 // interface keyword. There are currently supported four interfaces, 2120 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2121 // operand to generate a function which returns its register number when 2122 // queried. CONST_INTER causes an operand to generate a function which 2123 // returns the value of the constant when queried. MEMORY_INTER causes an 2124 // operand to generate four functions which return the Base Register, the 2125 // Index Register, the Scale Value, and the Offset Value of the operand when 2126 // queried. COND_INTER causes an operand to generate six functions which 2127 // return the encoding code (ie - encoding bits for the instruction) 2128 // associated with each basic boolean condition for a conditional instruction. 2129 // 2130 // Instructions specify two basic values for encoding. Again, a function 2131 // is available to check if the constant displacement is an oop. They use the 2132 // ins_encode keyword to specify their encoding classes (which must be 2133 // a sequence of enc_class names, and their parameters, specified in 2134 // the encoding block), and they use the 2135 // opcode keyword to specify, in order, their primary, secondary, and 2136 // tertiary opcode. Only the opcode sections which a particular instruction 2137 // needs for encoding need to be specified. 2138 encode %{ 2139 enc_class enc_untested %{ 2140 #ifdef ASSERT 2141 MacroAssembler _masm(&cbuf); 2142 __ untested("encoding"); 2143 #endif 2144 %} 2145 2146 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 2147 emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary, 2148 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2149 %} 2150 2151 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2152 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2153 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2154 %} 2155 2156 enc_class form3_mem_prefetch_read( memory mem ) %{ 2157 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2158 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2159 %} 2160 2161 enc_class form3_mem_prefetch_write( memory mem ) %{ 2162 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2163 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2164 %} 2165 2166 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2167 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2168 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2169 guarantee($mem$$index == R_G0_enc, "double index?"); 2170 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2171 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2172 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2173 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2174 %} 2175 2176 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2177 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2178 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2179 guarantee($mem$$index == R_G0_enc, "double index?"); 2180 // Load long with 2 instructions 2181 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2182 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2183 %} 2184 2185 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2186 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2187 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2188 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2189 %} 2190 2191 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2192 // Encode a reg-reg copy. If it is useless, then empty encoding. 2193 if( $rs2$$reg != $rd$$reg ) 2194 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2195 %} 2196 2197 // Target lo half of long 2198 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2199 // Encode a reg-reg copy. If it is useless, then empty encoding. 2200 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2201 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2202 %} 2203 2204 // Source lo half of long 2205 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2206 // Encode a reg-reg copy. If it is useless, then empty encoding. 2207 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2208 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2209 %} 2210 2211 // Target hi half of long 2212 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2213 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2214 %} 2215 2216 // Source lo half of long, and leave it sign extended. 2217 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2218 // Sign extend low half 2219 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2220 %} 2221 2222 // Source hi half of long, and leave it sign extended. 2223 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2224 // Shift high half to low half 2225 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2226 %} 2227 2228 // Source hi half of long 2229 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2230 // Encode a reg-reg copy. If it is useless, then empty encoding. 2231 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2232 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2233 %} 2234 2235 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2236 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2237 %} 2238 2239 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2240 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2241 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2242 %} 2243 2244 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2245 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2246 // clear if nothing else is happening 2247 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2248 // blt,a,pn done 2249 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2250 // mov dst,-1 in delay slot 2251 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2252 %} 2253 2254 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2255 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2256 %} 2257 2258 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2259 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2260 %} 2261 2262 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2263 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2264 %} 2265 2266 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2267 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2268 %} 2269 2270 enc_class move_return_pc_to_o1() %{ 2271 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2272 %} 2273 2274 #ifdef _LP64 2275 /* %%% merge with enc_to_bool */ 2276 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2277 MacroAssembler _masm(&cbuf); 2278 2279 Register src_reg = reg_to_register_object($src$$reg); 2280 Register dst_reg = reg_to_register_object($dst$$reg); 2281 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2282 %} 2283 #endif 2284 2285 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2286 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2287 MacroAssembler _masm(&cbuf); 2288 2289 Register p_reg = reg_to_register_object($p$$reg); 2290 Register q_reg = reg_to_register_object($q$$reg); 2291 Register y_reg = reg_to_register_object($y$$reg); 2292 Register tmp_reg = reg_to_register_object($tmp$$reg); 2293 2294 __ subcc( p_reg, q_reg, p_reg ); 2295 __ add ( p_reg, y_reg, tmp_reg ); 2296 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2297 %} 2298 2299 enc_class form_d2i_helper(regD src, regF dst) %{ 2300 // fcmp %fcc0,$src,$src 2301 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2302 // branch %fcc0 not-nan, predict taken 2303 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2304 // fdtoi $src,$dst 2305 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2306 // fitos $dst,$dst (if nan) 2307 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2308 // clear $dst (if nan) 2309 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2310 // carry on here... 2311 %} 2312 2313 enc_class form_d2l_helper(regD src, regD dst) %{ 2314 // fcmp %fcc0,$src,$src check for NAN 2315 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2316 // branch %fcc0 not-nan, predict taken 2317 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2318 // fdtox $src,$dst convert in delay slot 2319 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2320 // fxtod $dst,$dst (if nan) 2321 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2322 // clear $dst (if nan) 2323 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2324 // carry on here... 2325 %} 2326 2327 enc_class form_f2i_helper(regF src, regF dst) %{ 2328 // fcmps %fcc0,$src,$src 2329 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2330 // branch %fcc0 not-nan, predict taken 2331 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2332 // fstoi $src,$dst 2333 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2334 // fitos $dst,$dst (if nan) 2335 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2336 // clear $dst (if nan) 2337 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2338 // carry on here... 2339 %} 2340 2341 enc_class form_f2l_helper(regF src, regD dst) %{ 2342 // fcmps %fcc0,$src,$src 2343 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2344 // branch %fcc0 not-nan, predict taken 2345 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2346 // fstox $src,$dst 2347 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2348 // fxtod $dst,$dst (if nan) 2349 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2350 // clear $dst (if nan) 2351 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2352 // carry on here... 2353 %} 2354 2355 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2356 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2357 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2358 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2359 2360 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2361 2362 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2363 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2364 2365 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2366 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2367 %} 2368 2369 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2370 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2371 %} 2372 2373 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2374 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2375 %} 2376 2377 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2378 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2379 %} 2380 2381 enc_class form3_convI2F(regF rs2, regF rd) %{ 2382 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2383 %} 2384 2385 // Encloding class for traceable jumps 2386 enc_class form_jmpl(g3RegP dest) %{ 2387 emit_jmpl(cbuf, $dest$$reg); 2388 %} 2389 2390 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2391 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2392 %} 2393 2394 enc_class form2_nop() %{ 2395 emit_nop(cbuf); 2396 %} 2397 2398 enc_class form2_illtrap() %{ 2399 emit_illtrap(cbuf); 2400 %} 2401 2402 2403 // Compare longs and convert into -1, 0, 1. 2404 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2405 // CMP $src1,$src2 2406 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2407 // blt,a,pn done 2408 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2409 // mov dst,-1 in delay slot 2410 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2411 // bgt,a,pn done 2412 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2413 // mov dst,1 in delay slot 2414 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2415 // CLR $dst 2416 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2417 %} 2418 2419 enc_class enc_PartialSubtypeCheck() %{ 2420 MacroAssembler _masm(&cbuf); 2421 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2422 __ delayed()->nop(); 2423 %} 2424 2425 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2426 MacroAssembler _masm(&cbuf); 2427 Label* L = $labl$$label; 2428 Assembler::Predict predict_taken = 2429 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2430 2431 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2432 __ delayed()->nop(); 2433 %} 2434 2435 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2436 MacroAssembler _masm(&cbuf); 2437 Label* L = $labl$$label; 2438 Assembler::Predict predict_taken = 2439 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2440 2441 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2442 __ delayed()->nop(); 2443 %} 2444 2445 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2446 int op = (Assembler::arith_op << 30) | 2447 ($dst$$reg << 25) | 2448 (Assembler::movcc_op3 << 19) | 2449 (1 << 18) | // cc2 bit for 'icc' 2450 ($cmp$$cmpcode << 14) | 2451 (0 << 13) | // select register move 2452 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2453 ($src$$reg << 0); 2454 cbuf.insts()->emit_int32(op); 2455 %} 2456 2457 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2458 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2459 int op = (Assembler::arith_op << 30) | 2460 ($dst$$reg << 25) | 2461 (Assembler::movcc_op3 << 19) | 2462 (1 << 18) | // cc2 bit for 'icc' 2463 ($cmp$$cmpcode << 14) | 2464 (1 << 13) | // select immediate move 2465 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2466 (simm11 << 0); 2467 cbuf.insts()->emit_int32(op); 2468 %} 2469 2470 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2471 int op = (Assembler::arith_op << 30) | 2472 ($dst$$reg << 25) | 2473 (Assembler::movcc_op3 << 19) | 2474 (0 << 18) | // cc2 bit for 'fccX' 2475 ($cmp$$cmpcode << 14) | 2476 (0 << 13) | // select register move 2477 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2478 ($src$$reg << 0); 2479 cbuf.insts()->emit_int32(op); 2480 %} 2481 2482 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2483 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2484 int op = (Assembler::arith_op << 30) | 2485 ($dst$$reg << 25) | 2486 (Assembler::movcc_op3 << 19) | 2487 (0 << 18) | // cc2 bit for 'fccX' 2488 ($cmp$$cmpcode << 14) | 2489 (1 << 13) | // select immediate move 2490 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2491 (simm11 << 0); 2492 cbuf.insts()->emit_int32(op); 2493 %} 2494 2495 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2496 int op = (Assembler::arith_op << 30) | 2497 ($dst$$reg << 25) | 2498 (Assembler::fpop2_op3 << 19) | 2499 (0 << 18) | 2500 ($cmp$$cmpcode << 14) | 2501 (1 << 13) | // select register move 2502 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2503 ($primary << 5) | // select single, double or quad 2504 ($src$$reg << 0); 2505 cbuf.insts()->emit_int32(op); 2506 %} 2507 2508 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2509 int op = (Assembler::arith_op << 30) | 2510 ($dst$$reg << 25) | 2511 (Assembler::fpop2_op3 << 19) | 2512 (0 << 18) | 2513 ($cmp$$cmpcode << 14) | 2514 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2515 ($primary << 5) | // select single, double or quad 2516 ($src$$reg << 0); 2517 cbuf.insts()->emit_int32(op); 2518 %} 2519 2520 // Used by the MIN/MAX encodings. Same as a CMOV, but 2521 // the condition comes from opcode-field instead of an argument. 2522 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2523 int op = (Assembler::arith_op << 30) | 2524 ($dst$$reg << 25) | 2525 (Assembler::movcc_op3 << 19) | 2526 (1 << 18) | // cc2 bit for 'icc' 2527 ($primary << 14) | 2528 (0 << 13) | // select register move 2529 (0 << 11) | // cc1, cc0 bits for 'icc' 2530 ($src$$reg << 0); 2531 cbuf.insts()->emit_int32(op); 2532 %} 2533 2534 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2535 int op = (Assembler::arith_op << 30) | 2536 ($dst$$reg << 25) | 2537 (Assembler::movcc_op3 << 19) | 2538 (6 << 16) | // cc2 bit for 'xcc' 2539 ($primary << 14) | 2540 (0 << 13) | // select register move 2541 (0 << 11) | // cc1, cc0 bits for 'icc' 2542 ($src$$reg << 0); 2543 cbuf.insts()->emit_int32(op); 2544 %} 2545 2546 enc_class Set13( immI13 src, iRegI rd ) %{ 2547 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2548 %} 2549 2550 enc_class SetHi22( immI src, iRegI rd ) %{ 2551 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2552 %} 2553 2554 enc_class Set32( immI src, iRegI rd ) %{ 2555 MacroAssembler _masm(&cbuf); 2556 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2557 %} 2558 2559 enc_class call_epilog %{ 2560 if( VerifyStackAtCalls ) { 2561 MacroAssembler _masm(&cbuf); 2562 int framesize = ra_->C->frame_size_in_bytes(); 2563 Register temp_reg = G3; 2564 __ add(SP, framesize, temp_reg); 2565 __ cmp(temp_reg, FP); 2566 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2567 } 2568 %} 2569 2570 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2571 // to G1 so the register allocator will not have to deal with the misaligned register 2572 // pair. 2573 enc_class adjust_long_from_native_call %{ 2574 #ifndef _LP64 2575 if (returns_long()) { 2576 // sllx O0,32,O0 2577 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 ); 2578 // srl O1,0,O1 2579 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 ); 2580 // or O0,O1,G1 2581 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc ); 2582 } 2583 #endif 2584 %} 2585 2586 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2587 // CALL directly to the runtime 2588 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2589 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, 2590 /*preserve_g2=*/true); 2591 %} 2592 2593 enc_class preserve_SP %{ 2594 MacroAssembler _masm(&cbuf); 2595 __ mov(SP, L7_mh_SP_save); 2596 %} 2597 2598 enc_class restore_SP %{ 2599 MacroAssembler _masm(&cbuf); 2600 __ mov(L7_mh_SP_save, SP); 2601 %} 2602 2603 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2604 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2605 // who we intended to call. 2606 if (!_method) { 2607 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); 2608 } else if (_optimized_virtual) { 2609 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); 2610 } else { 2611 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); 2612 } 2613 if (_method) { // Emit stub for static call. 2614 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); 2615 // Stub does not fit into scratch buffer if TraceJumps is enabled 2616 if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) { 2617 ciEnv::current()->record_failure("CodeCache is full"); 2618 return; 2619 } 2620 } 2621 %} 2622 2623 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2624 MacroAssembler _masm(&cbuf); 2625 __ set_inst_mark(); 2626 int vtable_index = this->_vtable_index; 2627 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2628 if (vtable_index < 0) { 2629 // must be invalid_vtable_index, not nonvirtual_vtable_index 2630 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 2631 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2632 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2633 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2634 __ ic_call((address)$meth$$method); 2635 } else { 2636 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2637 // Just go thru the vtable 2638 // get receiver klass (receiver already checked for non-null) 2639 // If we end up going thru a c2i adapter interpreter expects method in G5 2640 int off = __ offset(); 2641 __ load_klass(O0, G3_scratch); 2642 int klass_load_size; 2643 if (UseCompressedClassPointers) { 2644 assert(Universe::heap() != NULL, "java heap should be initialized"); 2645 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 2646 } else { 2647 klass_load_size = 1*BytesPerInstWord; 2648 } 2649 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 2650 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 2651 if (Assembler::is_simm13(v_off)) { 2652 __ ld_ptr(G3, v_off, G5_method); 2653 } else { 2654 // Generate 2 instructions 2655 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2656 __ or3(G5_method, v_off & 0x3ff, G5_method); 2657 // ld_ptr, set_hi, set 2658 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2659 "Unexpected instruction size(s)"); 2660 __ ld_ptr(G3, G5_method, G5_method); 2661 } 2662 // NOTE: for vtable dispatches, the vtable entry will never be null. 2663 // However it may very well end up in handle_wrong_method if the 2664 // method is abstract for the particular class. 2665 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 2666 // jump to target (either compiled code or c2iadapter) 2667 __ jmpl(G3_scratch, G0, O7); 2668 __ delayed()->nop(); 2669 } 2670 %} 2671 2672 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2673 MacroAssembler _masm(&cbuf); 2674 2675 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2676 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2677 // we might be calling a C2I adapter which needs it. 2678 2679 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2680 // Load nmethod 2681 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg); 2682 2683 // CALL to compiled java, indirect the contents of G3 2684 __ set_inst_mark(); 2685 __ callr(temp_reg, G0); 2686 __ delayed()->nop(); 2687 %} 2688 2689 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2690 MacroAssembler _masm(&cbuf); 2691 Register Rdividend = reg_to_register_object($src1$$reg); 2692 Register Rdivisor = reg_to_register_object($src2$$reg); 2693 Register Rresult = reg_to_register_object($dst$$reg); 2694 2695 __ sra(Rdivisor, 0, Rdivisor); 2696 __ sra(Rdividend, 0, Rdividend); 2697 __ sdivx(Rdividend, Rdivisor, Rresult); 2698 %} 2699 2700 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2701 MacroAssembler _masm(&cbuf); 2702 2703 Register Rdividend = reg_to_register_object($src1$$reg); 2704 int divisor = $imm$$constant; 2705 Register Rresult = reg_to_register_object($dst$$reg); 2706 2707 __ sra(Rdividend, 0, Rdividend); 2708 __ sdivx(Rdividend, divisor, Rresult); 2709 %} 2710 2711 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2712 MacroAssembler _masm(&cbuf); 2713 Register Rsrc1 = reg_to_register_object($src1$$reg); 2714 Register Rsrc2 = reg_to_register_object($src2$$reg); 2715 Register Rdst = reg_to_register_object($dst$$reg); 2716 2717 __ sra( Rsrc1, 0, Rsrc1 ); 2718 __ sra( Rsrc2, 0, Rsrc2 ); 2719 __ mulx( Rsrc1, Rsrc2, Rdst ); 2720 __ srlx( Rdst, 32, Rdst ); 2721 %} 2722 2723 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2724 MacroAssembler _masm(&cbuf); 2725 Register Rdividend = reg_to_register_object($src1$$reg); 2726 Register Rdivisor = reg_to_register_object($src2$$reg); 2727 Register Rresult = reg_to_register_object($dst$$reg); 2728 Register Rscratch = reg_to_register_object($scratch$$reg); 2729 2730 assert(Rdividend != Rscratch, ""); 2731 assert(Rdivisor != Rscratch, ""); 2732 2733 __ sra(Rdividend, 0, Rdividend); 2734 __ sra(Rdivisor, 0, Rdivisor); 2735 __ sdivx(Rdividend, Rdivisor, Rscratch); 2736 __ mulx(Rscratch, Rdivisor, Rscratch); 2737 __ sub(Rdividend, Rscratch, Rresult); 2738 %} 2739 2740 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2741 MacroAssembler _masm(&cbuf); 2742 2743 Register Rdividend = reg_to_register_object($src1$$reg); 2744 int divisor = $imm$$constant; 2745 Register Rresult = reg_to_register_object($dst$$reg); 2746 Register Rscratch = reg_to_register_object($scratch$$reg); 2747 2748 assert(Rdividend != Rscratch, ""); 2749 2750 __ sra(Rdividend, 0, Rdividend); 2751 __ sdivx(Rdividend, divisor, Rscratch); 2752 __ mulx(Rscratch, divisor, Rscratch); 2753 __ sub(Rdividend, Rscratch, Rresult); 2754 %} 2755 2756 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2757 MacroAssembler _masm(&cbuf); 2758 2759 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2760 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2761 2762 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2763 %} 2764 2765 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2766 MacroAssembler _masm(&cbuf); 2767 2768 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2769 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2770 2771 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2772 %} 2773 2774 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2775 MacroAssembler _masm(&cbuf); 2776 2777 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2778 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2779 2780 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2781 %} 2782 2783 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2784 MacroAssembler _masm(&cbuf); 2785 2786 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2787 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2788 2789 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2790 %} 2791 2792 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2793 MacroAssembler _masm(&cbuf); 2794 2795 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2796 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2797 2798 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2799 %} 2800 2801 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2802 MacroAssembler _masm(&cbuf); 2803 2804 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2805 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2806 2807 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2808 %} 2809 2810 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2811 MacroAssembler _masm(&cbuf); 2812 2813 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2814 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2815 2816 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2817 %} 2818 2819 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2820 MacroAssembler _masm(&cbuf); 2821 2822 Register Roop = reg_to_register_object($oop$$reg); 2823 Register Rbox = reg_to_register_object($box$$reg); 2824 Register Rscratch = reg_to_register_object($scratch$$reg); 2825 Register Rmark = reg_to_register_object($scratch2$$reg); 2826 2827 assert(Roop != Rscratch, ""); 2828 assert(Roop != Rmark, ""); 2829 assert(Rbox != Rscratch, ""); 2830 assert(Rbox != Rmark, ""); 2831 2832 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2833 %} 2834 2835 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2836 MacroAssembler _masm(&cbuf); 2837 2838 Register Roop = reg_to_register_object($oop$$reg); 2839 Register Rbox = reg_to_register_object($box$$reg); 2840 Register Rscratch = reg_to_register_object($scratch$$reg); 2841 Register Rmark = reg_to_register_object($scratch2$$reg); 2842 2843 assert(Roop != Rscratch, ""); 2844 assert(Roop != Rmark, ""); 2845 assert(Rbox != Rscratch, ""); 2846 assert(Rbox != Rmark, ""); 2847 2848 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2849 %} 2850 2851 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2852 MacroAssembler _masm(&cbuf); 2853 Register Rmem = reg_to_register_object($mem$$reg); 2854 Register Rold = reg_to_register_object($old$$reg); 2855 Register Rnew = reg_to_register_object($new$$reg); 2856 2857 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2858 __ cmp( Rold, Rnew ); 2859 %} 2860 2861 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2862 Register Rmem = reg_to_register_object($mem$$reg); 2863 Register Rold = reg_to_register_object($old$$reg); 2864 Register Rnew = reg_to_register_object($new$$reg); 2865 2866 MacroAssembler _masm(&cbuf); 2867 __ mov(Rnew, O7); 2868 __ casx(Rmem, Rold, O7); 2869 __ cmp( Rold, O7 ); 2870 %} 2871 2872 // raw int cas, used for compareAndSwap 2873 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2874 Register Rmem = reg_to_register_object($mem$$reg); 2875 Register Rold = reg_to_register_object($old$$reg); 2876 Register Rnew = reg_to_register_object($new$$reg); 2877 2878 MacroAssembler _masm(&cbuf); 2879 __ mov(Rnew, O7); 2880 __ cas(Rmem, Rold, O7); 2881 __ cmp( Rold, O7 ); 2882 %} 2883 2884 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2885 Register Rres = reg_to_register_object($res$$reg); 2886 2887 MacroAssembler _masm(&cbuf); 2888 __ mov(1, Rres); 2889 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2890 %} 2891 2892 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2893 Register Rres = reg_to_register_object($res$$reg); 2894 2895 MacroAssembler _masm(&cbuf); 2896 __ mov(1, Rres); 2897 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2898 %} 2899 2900 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2901 MacroAssembler _masm(&cbuf); 2902 Register Rdst = reg_to_register_object($dst$$reg); 2903 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2904 : reg_to_DoubleFloatRegister_object($src1$$reg); 2905 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2906 : reg_to_DoubleFloatRegister_object($src2$$reg); 2907 2908 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2909 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2910 %} 2911 2912 2913 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ 2914 Label Ldone, Lloop; 2915 MacroAssembler _masm(&cbuf); 2916 2917 Register str1_reg = reg_to_register_object($str1$$reg); 2918 Register str2_reg = reg_to_register_object($str2$$reg); 2919 Register cnt1_reg = reg_to_register_object($cnt1$$reg); 2920 Register cnt2_reg = reg_to_register_object($cnt2$$reg); 2921 Register result_reg = reg_to_register_object($result$$reg); 2922 2923 assert(result_reg != str1_reg && 2924 result_reg != str2_reg && 2925 result_reg != cnt1_reg && 2926 result_reg != cnt2_reg , 2927 "need different registers"); 2928 2929 // Compute the minimum of the string lengths(str1_reg) and the 2930 // difference of the string lengths (stack) 2931 2932 // See if the lengths are different, and calculate min in str1_reg. 2933 // Stash diff in O7 in case we need it for a tie-breaker. 2934 Label Lskip; 2935 __ subcc(cnt1_reg, cnt2_reg, O7); 2936 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2937 __ br(Assembler::greater, true, Assembler::pt, Lskip); 2938 // cnt2 is shorter, so use its count: 2939 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2940 __ bind(Lskip); 2941 2942 // reallocate cnt1_reg, cnt2_reg, result_reg 2943 // Note: limit_reg holds the string length pre-scaled by 2 2944 Register limit_reg = cnt1_reg; 2945 Register chr2_reg = cnt2_reg; 2946 Register chr1_reg = result_reg; 2947 // str{12} are the base pointers 2948 2949 // Is the minimum length zero? 2950 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity 2951 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2952 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2953 2954 // Load first characters 2955 __ lduh(str1_reg, 0, chr1_reg); 2956 __ lduh(str2_reg, 0, chr2_reg); 2957 2958 // Compare first characters 2959 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2960 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2961 assert(chr1_reg == result_reg, "result must be pre-placed"); 2962 __ delayed()->nop(); 2963 2964 { 2965 // Check after comparing first character to see if strings are equivalent 2966 Label LSkip2; 2967 // Check if the strings start at same location 2968 __ cmp(str1_reg, str2_reg); 2969 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2); 2970 __ delayed()->nop(); 2971 2972 // Check if the length difference is zero (in O7) 2973 __ cmp(G0, O7); 2974 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2975 __ delayed()->mov(G0, result_reg); // result is zero 2976 2977 // Strings might not be equal 2978 __ bind(LSkip2); 2979 } 2980 2981 // We have no guarantee that on 64 bit the higher half of limit_reg is 0 2982 __ signx(limit_reg); 2983 2984 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); 2985 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2986 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2987 2988 // Shift str1_reg and str2_reg to the end of the arrays, negate limit 2989 __ add(str1_reg, limit_reg, str1_reg); 2990 __ add(str2_reg, limit_reg, str2_reg); 2991 __ neg(chr1_reg, limit_reg); // limit = -(limit-2) 2992 2993 // Compare the rest of the characters 2994 __ lduh(str1_reg, limit_reg, chr1_reg); 2995 __ bind(Lloop); 2996 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted 2997 __ lduh(str2_reg, limit_reg, chr2_reg); 2998 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2999 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 3000 assert(chr1_reg == result_reg, "result must be pre-placed"); 3001 __ delayed()->inccc(limit_reg, sizeof(jchar)); 3002 // annul LDUH if branch is not taken to prevent access past end of string 3003 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 3004 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3005 3006 // If strings are equal up to min length, return the length difference. 3007 __ mov(O7, result_reg); 3008 3009 // Otherwise, return the difference between the first mismatched chars. 3010 __ bind(Ldone); 3011 %} 3012 3013 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{ 3014 Label Lchar, Lchar_loop, Ldone; 3015 MacroAssembler _masm(&cbuf); 3016 3017 Register str1_reg = reg_to_register_object($str1$$reg); 3018 Register str2_reg = reg_to_register_object($str2$$reg); 3019 Register cnt_reg = reg_to_register_object($cnt$$reg); 3020 Register tmp1_reg = O7; 3021 Register result_reg = reg_to_register_object($result$$reg); 3022 3023 assert(result_reg != str1_reg && 3024 result_reg != str2_reg && 3025 result_reg != cnt_reg && 3026 result_reg != tmp1_reg , 3027 "need different registers"); 3028 3029 __ cmp(str1_reg, str2_reg); //same char[] ? 3030 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3031 __ delayed()->add(G0, 1, result_reg); 3032 3033 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn); 3034 __ delayed()->add(G0, 1, result_reg); // count == 0 3035 3036 //rename registers 3037 Register limit_reg = cnt_reg; 3038 Register chr1_reg = result_reg; 3039 Register chr2_reg = tmp1_reg; 3040 3041 // We have no guarantee that on 64 bit the higher half of limit_reg is 0 3042 __ signx(limit_reg); 3043 3044 //check for alignment and position the pointers to the ends 3045 __ or3(str1_reg, str2_reg, chr1_reg); 3046 __ andcc(chr1_reg, 0x3, chr1_reg); 3047 // notZero means at least one not 4-byte aligned. 3048 // We could optimize the case when both arrays are not aligned 3049 // but it is not frequent case and it requires additional checks. 3050 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare 3051 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count 3052 3053 // Compare char[] arrays aligned to 4 bytes. 3054 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg, 3055 chr1_reg, chr2_reg, Ldone); 3056 __ ba(Ldone); 3057 __ delayed()->add(G0, 1, result_reg); 3058 3059 // char by char compare 3060 __ bind(Lchar); 3061 __ add(str1_reg, limit_reg, str1_reg); 3062 __ add(str2_reg, limit_reg, str2_reg); 3063 __ neg(limit_reg); //negate count 3064 3065 __ lduh(str1_reg, limit_reg, chr1_reg); 3066 // Lchar_loop 3067 __ bind(Lchar_loop); 3068 __ lduh(str2_reg, limit_reg, chr2_reg); 3069 __ cmp(chr1_reg, chr2_reg); 3070 __ br(Assembler::notEqual, true, Assembler::pt, Ldone); 3071 __ delayed()->mov(G0, result_reg); //not equal 3072 __ inccc(limit_reg, sizeof(jchar)); 3073 // annul LDUH if branch is not taken to prevent access past end of string 3074 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop); 3075 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3076 3077 __ add(G0, 1, result_reg); //equal 3078 3079 __ bind(Ldone); 3080 %} 3081 3082 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{ 3083 Label Lvector, Ldone, Lloop; 3084 MacroAssembler _masm(&cbuf); 3085 3086 Register ary1_reg = reg_to_register_object($ary1$$reg); 3087 Register ary2_reg = reg_to_register_object($ary2$$reg); 3088 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 3089 Register tmp2_reg = O7; 3090 Register result_reg = reg_to_register_object($result$$reg); 3091 3092 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3093 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 3094 3095 // return true if the same array 3096 __ cmp(ary1_reg, ary2_reg); 3097 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3098 __ delayed()->add(G0, 1, result_reg); // equal 3099 3100 __ br_null(ary1_reg, true, Assembler::pn, Ldone); 3101 __ delayed()->mov(G0, result_reg); // not equal 3102 3103 __ br_null(ary2_reg, true, Assembler::pn, Ldone); 3104 __ delayed()->mov(G0, result_reg); // not equal 3105 3106 //load the lengths of arrays 3107 __ ld(Address(ary1_reg, length_offset), tmp1_reg); 3108 __ ld(Address(ary2_reg, length_offset), tmp2_reg); 3109 3110 // return false if the two arrays are not equal length 3111 __ cmp(tmp1_reg, tmp2_reg); 3112 __ br(Assembler::notEqual, true, Assembler::pn, Ldone); 3113 __ delayed()->mov(G0, result_reg); // not equal 3114 3115 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn); 3116 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal 3117 3118 // load array addresses 3119 __ add(ary1_reg, base_offset, ary1_reg); 3120 __ add(ary2_reg, base_offset, ary2_reg); 3121 3122 // renaming registers 3123 Register chr1_reg = result_reg; // for characters in ary1 3124 Register chr2_reg = tmp2_reg; // for characters in ary2 3125 Register limit_reg = tmp1_reg; // length 3126 3127 // set byte count 3128 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); 3129 3130 // Compare char[] arrays aligned to 4 bytes. 3131 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg, 3132 chr1_reg, chr2_reg, Ldone); 3133 __ add(G0, 1, result_reg); // equals 3134 3135 __ bind(Ldone); 3136 %} 3137 3138 enc_class enc_rethrow() %{ 3139 cbuf.set_insts_mark(); 3140 Register temp_reg = G3; 3141 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 3142 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 3143 MacroAssembler _masm(&cbuf); 3144 #ifdef ASSERT 3145 __ save_frame(0); 3146 AddressLiteral last_rethrow_addrlit(&last_rethrow); 3147 __ sethi(last_rethrow_addrlit, L1); 3148 Address addr(L1, last_rethrow_addrlit.low10()); 3149 __ rdpc(L2); 3150 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 3151 __ st_ptr(L2, addr); 3152 __ restore(); 3153 #endif 3154 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 3155 __ delayed()->nop(); 3156 %} 3157 3158 enc_class emit_mem_nop() %{ 3159 // Generates the instruction LDUXA [o6,g0],#0x82,g0 3160 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 3161 %} 3162 3163 enc_class emit_fadd_nop() %{ 3164 // Generates the instruction FMOVS f31,f31 3165 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 3166 %} 3167 3168 enc_class emit_br_nop() %{ 3169 // Generates the instruction BPN,PN . 3170 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 3171 %} 3172 3173 enc_class enc_membar_acquire %{ 3174 MacroAssembler _masm(&cbuf); 3175 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 3176 %} 3177 3178 enc_class enc_membar_release %{ 3179 MacroAssembler _masm(&cbuf); 3180 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 3181 %} 3182 3183 enc_class enc_membar_volatile %{ 3184 MacroAssembler _masm(&cbuf); 3185 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3186 %} 3187 3188 %} 3189 3190 //----------FRAME-------------------------------------------------------------- 3191 // Definition of frame structure and management information. 3192 // 3193 // S T A C K L A Y O U T Allocators stack-slot number 3194 // | (to get allocators register number 3195 // G Owned by | | v add VMRegImpl::stack0) 3196 // r CALLER | | 3197 // o | +--------+ pad to even-align allocators stack-slot 3198 // w V | pad0 | numbers; owned by CALLER 3199 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3200 // h ^ | in | 5 3201 // | | args | 4 Holes in incoming args owned by SELF 3202 // | | | | 3 3203 // | | +--------+ 3204 // V | | old out| Empty on Intel, window on Sparc 3205 // | old |preserve| Must be even aligned. 3206 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 3207 // | | in | 3 area for Intel ret address 3208 // Owned by |preserve| Empty on Sparc. 3209 // SELF +--------+ 3210 // | | pad2 | 2 pad to align old SP 3211 // | +--------+ 1 3212 // | | locks | 0 3213 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 3214 // | | pad1 | 11 pad to align new SP 3215 // | +--------+ 3216 // | | | 10 3217 // | | spills | 9 spills 3218 // V | | 8 (pad0 slot for callee) 3219 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3220 // ^ | out | 7 3221 // | | args | 6 Holes in outgoing args owned by CALLEE 3222 // Owned by +--------+ 3223 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3224 // | new |preserve| Must be even-aligned. 3225 // | SP-+--------+----> Matcher::_new_SP, even aligned 3226 // | | | 3227 // 3228 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3229 // known from SELF's arguments and the Java calling convention. 3230 // Region 6-7 is determined per call site. 3231 // Note 2: If the calling convention leaves holes in the incoming argument 3232 // area, those holes are owned by SELF. Holes in the outgoing area 3233 // are owned by the CALLEE. Holes should not be nessecary in the 3234 // incoming area, as the Java calling convention is completely under 3235 // the control of the AD file. Doubles can be sorted and packed to 3236 // avoid holes. Holes in the outgoing arguments may be necessary for 3237 // varargs C calling conventions. 3238 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3239 // even aligned with pad0 as needed. 3240 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3241 // region 6-11 is even aligned; it may be padded out more so that 3242 // the region from SP to FP meets the minimum stack alignment. 3243 3244 frame %{ 3245 // What direction does stack grow in (assumed to be same for native & Java) 3246 stack_direction(TOWARDS_LOW); 3247 3248 // These two registers define part of the calling convention 3249 // between compiled code and the interpreter. 3250 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C 3251 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 3252 3253 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 3254 cisc_spilling_operand_name(indOffset); 3255 3256 // Number of stack slots consumed by a Monitor enter 3257 #ifdef _LP64 3258 sync_stack_slots(2); 3259 #else 3260 sync_stack_slots(1); 3261 #endif 3262 3263 // Compiled code's Frame Pointer 3264 frame_pointer(R_SP); 3265 3266 // Stack alignment requirement 3267 stack_alignment(StackAlignmentInBytes); 3268 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 3269 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 3270 3271 // Number of stack slots between incoming argument block and the start of 3272 // a new frame. The PROLOG must add this many slots to the stack. The 3273 // EPILOG must remove this many slots. 3274 in_preserve_stack_slots(0); 3275 3276 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3277 // for calls to C. Supports the var-args backing area for register parms. 3278 // ADLC doesn't support parsing expressions, so I folded the math by hand. 3279 #ifdef _LP64 3280 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 3281 varargs_C_out_slots_killed(12); 3282 #else 3283 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word 3284 varargs_C_out_slots_killed( 7); 3285 #endif 3286 3287 // The after-PROLOG location of the return address. Location of 3288 // return address specifies a type (REG or STACK) and a number 3289 // representing the register number (i.e. - use a register name) or 3290 // stack slot. 3291 return_addr(REG R_I7); // Ret Addr is in register I7 3292 3293 // Body of function which returns an OptoRegs array locating 3294 // arguments either in registers or in stack slots for calling 3295 // java 3296 calling_convention %{ 3297 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3298 3299 %} 3300 3301 // Body of function which returns an OptoRegs array locating 3302 // arguments either in registers or in stack slots for calling 3303 // C. 3304 c_calling_convention %{ 3305 // This is obviously always outgoing 3306 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length); 3307 %} 3308 3309 // Location of native (C/C++) and interpreter return values. This is specified to 3310 // be the same as Java. In the 32-bit VM, long values are actually returned from 3311 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3312 // to and from the register pairs is done by the appropriate call and epilog 3313 // opcodes. This simplifies the register allocator. 3314 c_return_value %{ 3315 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3316 #ifdef _LP64 3317 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3318 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3319 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3320 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3321 #else // !_LP64 3322 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3323 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3324 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3325 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3326 #endif 3327 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3328 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3329 %} 3330 3331 // Location of compiled Java return values. Same as C 3332 return_value %{ 3333 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3334 #ifdef _LP64 3335 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3336 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3337 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3338 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3339 #else // !_LP64 3340 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3341 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3342 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3343 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3344 #endif 3345 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3346 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3347 %} 3348 3349 %} 3350 3351 3352 //----------ATTRIBUTES--------------------------------------------------------- 3353 //----------Operand Attributes------------------------------------------------- 3354 op_attrib op_cost(1); // Required cost attribute 3355 3356 //----------Instruction Attributes--------------------------------------------- 3357 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3358 ins_attrib ins_size(32); // Required size attribute (in bits) 3359 3360 // avoid_back_to_back attribute is an expression that must return 3361 // one of the following values defined in MachNode: 3362 // AVOID_NONE - instruction can be placed anywhere 3363 // AVOID_BEFORE - instruction cannot be placed after an 3364 // instruction with MachNode::AVOID_AFTER 3365 // AVOID_AFTER - the next instruction cannot be the one 3366 // with MachNode::AVOID_BEFORE 3367 // AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at 3368 // the same time 3369 ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE); 3370 3371 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3372 // non-matching short branch variant of some 3373 // long branch? 3374 3375 //----------OPERANDS----------------------------------------------------------- 3376 // Operand definitions must precede instruction definitions for correct parsing 3377 // in the ADLC because operands constitute user defined types which are used in 3378 // instruction definitions. 3379 3380 //----------Simple Operands---------------------------------------------------- 3381 // Immediate Operands 3382 // Integer Immediate: 32-bit 3383 operand immI() %{ 3384 match(ConI); 3385 3386 op_cost(0); 3387 // formats are generated automatically for constants and base registers 3388 format %{ %} 3389 interface(CONST_INTER); 3390 %} 3391 3392 // Integer Immediate: 0-bit 3393 operand immI0() %{ 3394 predicate(n->get_int() == 0); 3395 match(ConI); 3396 op_cost(0); 3397 3398 format %{ %} 3399 interface(CONST_INTER); 3400 %} 3401 3402 // Integer Immediate: 5-bit 3403 operand immI5() %{ 3404 predicate(Assembler::is_simm5(n->get_int())); 3405 match(ConI); 3406 op_cost(0); 3407 format %{ %} 3408 interface(CONST_INTER); 3409 %} 3410 3411 // Integer Immediate: 8-bit 3412 operand immI8() %{ 3413 predicate(Assembler::is_simm8(n->get_int())); 3414 match(ConI); 3415 op_cost(0); 3416 format %{ %} 3417 interface(CONST_INTER); 3418 %} 3419 3420 // Integer Immediate: the value 10 3421 operand immI10() %{ 3422 predicate(n->get_int() == 10); 3423 match(ConI); 3424 op_cost(0); 3425 3426 format %{ %} 3427 interface(CONST_INTER); 3428 %} 3429 3430 // Integer Immediate: 11-bit 3431 operand immI11() %{ 3432 predicate(Assembler::is_simm11(n->get_int())); 3433 match(ConI); 3434 op_cost(0); 3435 format %{ %} 3436 interface(CONST_INTER); 3437 %} 3438 3439 // Integer Immediate: 13-bit 3440 operand immI13() %{ 3441 predicate(Assembler::is_simm13(n->get_int())); 3442 match(ConI); 3443 op_cost(0); 3444 3445 format %{ %} 3446 interface(CONST_INTER); 3447 %} 3448 3449 // Integer Immediate: 13-bit minus 7 3450 operand immI13m7() %{ 3451 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3452 match(ConI); 3453 op_cost(0); 3454 3455 format %{ %} 3456 interface(CONST_INTER); 3457 %} 3458 3459 // Integer Immediate: 16-bit 3460 operand immI16() %{ 3461 predicate(Assembler::is_simm16(n->get_int())); 3462 match(ConI); 3463 op_cost(0); 3464 format %{ %} 3465 interface(CONST_INTER); 3466 %} 3467 3468 // Integer Immediate: the values 1-31 3469 operand immI_1_31() %{ 3470 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3471 match(ConI); 3472 op_cost(0); 3473 3474 format %{ %} 3475 interface(CONST_INTER); 3476 %} 3477 3478 // Integer Immediate: the values 32-63 3479 operand immI_32_63() %{ 3480 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3481 match(ConI); 3482 op_cost(0); 3483 3484 format %{ %} 3485 interface(CONST_INTER); 3486 %} 3487 3488 // Immediates for special shifts (sign extend) 3489 3490 // Integer Immediate: the value 16 3491 operand immI_16() %{ 3492 predicate(n->get_int() == 16); 3493 match(ConI); 3494 op_cost(0); 3495 3496 format %{ %} 3497 interface(CONST_INTER); 3498 %} 3499 3500 // Integer Immediate: the value 24 3501 operand immI_24() %{ 3502 predicate(n->get_int() == 24); 3503 match(ConI); 3504 op_cost(0); 3505 3506 format %{ %} 3507 interface(CONST_INTER); 3508 %} 3509 // Integer Immediate: the value 255 3510 operand immI_255() %{ 3511 predicate( n->get_int() == 255 ); 3512 match(ConI); 3513 op_cost(0); 3514 3515 format %{ %} 3516 interface(CONST_INTER); 3517 %} 3518 3519 // Integer Immediate: the value 65535 3520 operand immI_65535() %{ 3521 predicate(n->get_int() == 65535); 3522 match(ConI); 3523 op_cost(0); 3524 3525 format %{ %} 3526 interface(CONST_INTER); 3527 %} 3528 3529 // Integer Immediate: the values 0-31 3530 operand immU5() %{ 3531 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3532 match(ConI); 3533 op_cost(0); 3534 3535 format %{ %} 3536 interface(CONST_INTER); 3537 %} 3538 3539 // Integer Immediate: 6-bit 3540 operand immU6() %{ 3541 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3542 match(ConI); 3543 op_cost(0); 3544 format %{ %} 3545 interface(CONST_INTER); 3546 %} 3547 3548 // Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13) 3549 operand immU12() %{ 3550 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3551 match(ConI); 3552 op_cost(0); 3553 3554 format %{ %} 3555 interface(CONST_INTER); 3556 %} 3557 3558 // Integer Immediate non-negative 3559 operand immU31() 3560 %{ 3561 predicate(n->get_int() >= 0); 3562 match(ConI); 3563 3564 op_cost(0); 3565 format %{ %} 3566 interface(CONST_INTER); 3567 %} 3568 3569 // Long Immediate: the value FF 3570 operand immL_FF() %{ 3571 predicate( n->get_long() == 0xFFL ); 3572 match(ConL); 3573 op_cost(0); 3574 3575 format %{ %} 3576 interface(CONST_INTER); 3577 %} 3578 3579 // Long Immediate: the value FFFF 3580 operand immL_FFFF() %{ 3581 predicate( n->get_long() == 0xFFFFL ); 3582 match(ConL); 3583 op_cost(0); 3584 3585 format %{ %} 3586 interface(CONST_INTER); 3587 %} 3588 3589 // Pointer Immediate: 32 or 64-bit 3590 operand immP() %{ 3591 match(ConP); 3592 3593 op_cost(5); 3594 // formats are generated automatically for constants and base registers 3595 format %{ %} 3596 interface(CONST_INTER); 3597 %} 3598 3599 #ifdef _LP64 3600 // Pointer Immediate: 64-bit 3601 operand immP_set() %{ 3602 predicate(!VM_Version::is_niagara_plus()); 3603 match(ConP); 3604 3605 op_cost(5); 3606 // formats are generated automatically for constants and base registers 3607 format %{ %} 3608 interface(CONST_INTER); 3609 %} 3610 3611 // Pointer Immediate: 64-bit 3612 // From Niagara2 processors on a load should be better than materializing. 3613 operand immP_load() %{ 3614 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3615 match(ConP); 3616 3617 op_cost(5); 3618 // formats are generated automatically for constants and base registers 3619 format %{ %} 3620 interface(CONST_INTER); 3621 %} 3622 3623 // Pointer Immediate: 64-bit 3624 operand immP_no_oop_cheap() %{ 3625 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3626 match(ConP); 3627 3628 op_cost(5); 3629 // formats are generated automatically for constants and base registers 3630 format %{ %} 3631 interface(CONST_INTER); 3632 %} 3633 #endif 3634 3635 operand immP13() %{ 3636 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3637 match(ConP); 3638 op_cost(0); 3639 3640 format %{ %} 3641 interface(CONST_INTER); 3642 %} 3643 3644 operand immP0() %{ 3645 predicate(n->get_ptr() == 0); 3646 match(ConP); 3647 op_cost(0); 3648 3649 format %{ %} 3650 interface(CONST_INTER); 3651 %} 3652 3653 operand immP_poll() %{ 3654 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3655 match(ConP); 3656 3657 // formats are generated automatically for constants and base registers 3658 format %{ %} 3659 interface(CONST_INTER); 3660 %} 3661 3662 // Pointer Immediate 3663 operand immN() 3664 %{ 3665 match(ConN); 3666 3667 op_cost(10); 3668 format %{ %} 3669 interface(CONST_INTER); 3670 %} 3671 3672 operand immNKlass() 3673 %{ 3674 match(ConNKlass); 3675 3676 op_cost(10); 3677 format %{ %} 3678 interface(CONST_INTER); 3679 %} 3680 3681 // NULL Pointer Immediate 3682 operand immN0() 3683 %{ 3684 predicate(n->get_narrowcon() == 0); 3685 match(ConN); 3686 3687 op_cost(0); 3688 format %{ %} 3689 interface(CONST_INTER); 3690 %} 3691 3692 operand immL() %{ 3693 match(ConL); 3694 op_cost(40); 3695 // formats are generated automatically for constants and base registers 3696 format %{ %} 3697 interface(CONST_INTER); 3698 %} 3699 3700 operand immL0() %{ 3701 predicate(n->get_long() == 0L); 3702 match(ConL); 3703 op_cost(0); 3704 // formats are generated automatically for constants and base registers 3705 format %{ %} 3706 interface(CONST_INTER); 3707 %} 3708 3709 // Integer Immediate: 5-bit 3710 operand immL5() %{ 3711 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3712 match(ConL); 3713 op_cost(0); 3714 format %{ %} 3715 interface(CONST_INTER); 3716 %} 3717 3718 // Long Immediate: 13-bit 3719 operand immL13() %{ 3720 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3721 match(ConL); 3722 op_cost(0); 3723 3724 format %{ %} 3725 interface(CONST_INTER); 3726 %} 3727 3728 // Long Immediate: 13-bit minus 7 3729 operand immL13m7() %{ 3730 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3731 match(ConL); 3732 op_cost(0); 3733 3734 format %{ %} 3735 interface(CONST_INTER); 3736 %} 3737 3738 // Long Immediate: low 32-bit mask 3739 operand immL_32bits() %{ 3740 predicate(n->get_long() == 0xFFFFFFFFL); 3741 match(ConL); 3742 op_cost(0); 3743 3744 format %{ %} 3745 interface(CONST_INTER); 3746 %} 3747 3748 // Long Immediate: cheap (materialize in <= 3 instructions) 3749 operand immL_cheap() %{ 3750 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3751 match(ConL); 3752 op_cost(0); 3753 3754 format %{ %} 3755 interface(CONST_INTER); 3756 %} 3757 3758 // Long Immediate: expensive (materialize in > 3 instructions) 3759 operand immL_expensive() %{ 3760 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3761 match(ConL); 3762 op_cost(0); 3763 3764 format %{ %} 3765 interface(CONST_INTER); 3766 %} 3767 3768 // Double Immediate 3769 operand immD() %{ 3770 match(ConD); 3771 3772 op_cost(40); 3773 format %{ %} 3774 interface(CONST_INTER); 3775 %} 3776 3777 // Double Immediate: +0.0d 3778 operand immD0() %{ 3779 predicate(jlong_cast(n->getd()) == 0); 3780 match(ConD); 3781 3782 op_cost(0); 3783 format %{ %} 3784 interface(CONST_INTER); 3785 %} 3786 3787 // Float Immediate 3788 operand immF() %{ 3789 match(ConF); 3790 3791 op_cost(20); 3792 format %{ %} 3793 interface(CONST_INTER); 3794 %} 3795 3796 // Float Immediate: +0.0f 3797 operand immF0() %{ 3798 predicate(jint_cast(n->getf()) == 0); 3799 match(ConF); 3800 3801 op_cost(0); 3802 format %{ %} 3803 interface(CONST_INTER); 3804 %} 3805 3806 // Integer Register Operands 3807 // Integer Register 3808 operand iRegI() %{ 3809 constraint(ALLOC_IN_RC(int_reg)); 3810 match(RegI); 3811 3812 match(notemp_iRegI); 3813 match(g1RegI); 3814 match(o0RegI); 3815 match(iRegIsafe); 3816 3817 format %{ %} 3818 interface(REG_INTER); 3819 %} 3820 3821 operand notemp_iRegI() %{ 3822 constraint(ALLOC_IN_RC(notemp_int_reg)); 3823 match(RegI); 3824 3825 match(o0RegI); 3826 3827 format %{ %} 3828 interface(REG_INTER); 3829 %} 3830 3831 operand o0RegI() %{ 3832 constraint(ALLOC_IN_RC(o0_regI)); 3833 match(iRegI); 3834 3835 format %{ %} 3836 interface(REG_INTER); 3837 %} 3838 3839 // Pointer Register 3840 operand iRegP() %{ 3841 constraint(ALLOC_IN_RC(ptr_reg)); 3842 match(RegP); 3843 3844 match(lock_ptr_RegP); 3845 match(g1RegP); 3846 match(g2RegP); 3847 match(g3RegP); 3848 match(g4RegP); 3849 match(i0RegP); 3850 match(o0RegP); 3851 match(o1RegP); 3852 match(l7RegP); 3853 3854 format %{ %} 3855 interface(REG_INTER); 3856 %} 3857 3858 operand sp_ptr_RegP() %{ 3859 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3860 match(RegP); 3861 match(iRegP); 3862 3863 format %{ %} 3864 interface(REG_INTER); 3865 %} 3866 3867 operand lock_ptr_RegP() %{ 3868 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3869 match(RegP); 3870 match(i0RegP); 3871 match(o0RegP); 3872 match(o1RegP); 3873 match(l7RegP); 3874 3875 format %{ %} 3876 interface(REG_INTER); 3877 %} 3878 3879 operand g1RegP() %{ 3880 constraint(ALLOC_IN_RC(g1_regP)); 3881 match(iRegP); 3882 3883 format %{ %} 3884 interface(REG_INTER); 3885 %} 3886 3887 operand g2RegP() %{ 3888 constraint(ALLOC_IN_RC(g2_regP)); 3889 match(iRegP); 3890 3891 format %{ %} 3892 interface(REG_INTER); 3893 %} 3894 3895 operand g3RegP() %{ 3896 constraint(ALLOC_IN_RC(g3_regP)); 3897 match(iRegP); 3898 3899 format %{ %} 3900 interface(REG_INTER); 3901 %} 3902 3903 operand g1RegI() %{ 3904 constraint(ALLOC_IN_RC(g1_regI)); 3905 match(iRegI); 3906 3907 format %{ %} 3908 interface(REG_INTER); 3909 %} 3910 3911 operand g3RegI() %{ 3912 constraint(ALLOC_IN_RC(g3_regI)); 3913 match(iRegI); 3914 3915 format %{ %} 3916 interface(REG_INTER); 3917 %} 3918 3919 operand g4RegI() %{ 3920 constraint(ALLOC_IN_RC(g4_regI)); 3921 match(iRegI); 3922 3923 format %{ %} 3924 interface(REG_INTER); 3925 %} 3926 3927 operand g4RegP() %{ 3928 constraint(ALLOC_IN_RC(g4_regP)); 3929 match(iRegP); 3930 3931 format %{ %} 3932 interface(REG_INTER); 3933 %} 3934 3935 operand i0RegP() %{ 3936 constraint(ALLOC_IN_RC(i0_regP)); 3937 match(iRegP); 3938 3939 format %{ %} 3940 interface(REG_INTER); 3941 %} 3942 3943 operand o0RegP() %{ 3944 constraint(ALLOC_IN_RC(o0_regP)); 3945 match(iRegP); 3946 3947 format %{ %} 3948 interface(REG_INTER); 3949 %} 3950 3951 operand o1RegP() %{ 3952 constraint(ALLOC_IN_RC(o1_regP)); 3953 match(iRegP); 3954 3955 format %{ %} 3956 interface(REG_INTER); 3957 %} 3958 3959 operand o2RegP() %{ 3960 constraint(ALLOC_IN_RC(o2_regP)); 3961 match(iRegP); 3962 3963 format %{ %} 3964 interface(REG_INTER); 3965 %} 3966 3967 operand o7RegP() %{ 3968 constraint(ALLOC_IN_RC(o7_regP)); 3969 match(iRegP); 3970 3971 format %{ %} 3972 interface(REG_INTER); 3973 %} 3974 3975 operand l7RegP() %{ 3976 constraint(ALLOC_IN_RC(l7_regP)); 3977 match(iRegP); 3978 3979 format %{ %} 3980 interface(REG_INTER); 3981 %} 3982 3983 operand o7RegI() %{ 3984 constraint(ALLOC_IN_RC(o7_regI)); 3985 match(iRegI); 3986 3987 format %{ %} 3988 interface(REG_INTER); 3989 %} 3990 3991 operand iRegN() %{ 3992 constraint(ALLOC_IN_RC(int_reg)); 3993 match(RegN); 3994 3995 format %{ %} 3996 interface(REG_INTER); 3997 %} 3998 3999 // Long Register 4000 operand iRegL() %{ 4001 constraint(ALLOC_IN_RC(long_reg)); 4002 match(RegL); 4003 4004 format %{ %} 4005 interface(REG_INTER); 4006 %} 4007 4008 operand o2RegL() %{ 4009 constraint(ALLOC_IN_RC(o2_regL)); 4010 match(iRegL); 4011 4012 format %{ %} 4013 interface(REG_INTER); 4014 %} 4015 4016 operand o7RegL() %{ 4017 constraint(ALLOC_IN_RC(o7_regL)); 4018 match(iRegL); 4019 4020 format %{ %} 4021 interface(REG_INTER); 4022 %} 4023 4024 operand g1RegL() %{ 4025 constraint(ALLOC_IN_RC(g1_regL)); 4026 match(iRegL); 4027 4028 format %{ %} 4029 interface(REG_INTER); 4030 %} 4031 4032 operand g3RegL() %{ 4033 constraint(ALLOC_IN_RC(g3_regL)); 4034 match(iRegL); 4035 4036 format %{ %} 4037 interface(REG_INTER); 4038 %} 4039 4040 // Int Register safe 4041 // This is 64bit safe 4042 operand iRegIsafe() %{ 4043 constraint(ALLOC_IN_RC(long_reg)); 4044 4045 match(iRegI); 4046 4047 format %{ %} 4048 interface(REG_INTER); 4049 %} 4050 4051 // Condition Code Flag Register 4052 operand flagsReg() %{ 4053 constraint(ALLOC_IN_RC(int_flags)); 4054 match(RegFlags); 4055 4056 format %{ "ccr" %} // both ICC and XCC 4057 interface(REG_INTER); 4058 %} 4059 4060 // Condition Code Register, unsigned comparisons. 4061 operand flagsRegU() %{ 4062 constraint(ALLOC_IN_RC(int_flags)); 4063 match(RegFlags); 4064 4065 format %{ "icc_U" %} 4066 interface(REG_INTER); 4067 %} 4068 4069 // Condition Code Register, pointer comparisons. 4070 operand flagsRegP() %{ 4071 constraint(ALLOC_IN_RC(int_flags)); 4072 match(RegFlags); 4073 4074 #ifdef _LP64 4075 format %{ "xcc_P" %} 4076 #else 4077 format %{ "icc_P" %} 4078 #endif 4079 interface(REG_INTER); 4080 %} 4081 4082 // Condition Code Register, long comparisons. 4083 operand flagsRegL() %{ 4084 constraint(ALLOC_IN_RC(int_flags)); 4085 match(RegFlags); 4086 4087 format %{ "xcc_L" %} 4088 interface(REG_INTER); 4089 %} 4090 4091 // Condition Code Register, floating comparisons, unordered same as "less". 4092 operand flagsRegF() %{ 4093 constraint(ALLOC_IN_RC(float_flags)); 4094 match(RegFlags); 4095 match(flagsRegF0); 4096 4097 format %{ %} 4098 interface(REG_INTER); 4099 %} 4100 4101 operand flagsRegF0() %{ 4102 constraint(ALLOC_IN_RC(float_flag0)); 4103 match(RegFlags); 4104 4105 format %{ %} 4106 interface(REG_INTER); 4107 %} 4108 4109 4110 // Condition Code Flag Register used by long compare 4111 operand flagsReg_long_LTGE() %{ 4112 constraint(ALLOC_IN_RC(int_flags)); 4113 match(RegFlags); 4114 format %{ "icc_LTGE" %} 4115 interface(REG_INTER); 4116 %} 4117 operand flagsReg_long_EQNE() %{ 4118 constraint(ALLOC_IN_RC(int_flags)); 4119 match(RegFlags); 4120 format %{ "icc_EQNE" %} 4121 interface(REG_INTER); 4122 %} 4123 operand flagsReg_long_LEGT() %{ 4124 constraint(ALLOC_IN_RC(int_flags)); 4125 match(RegFlags); 4126 format %{ "icc_LEGT" %} 4127 interface(REG_INTER); 4128 %} 4129 4130 4131 operand regD() %{ 4132 constraint(ALLOC_IN_RC(dflt_reg)); 4133 match(RegD); 4134 4135 match(regD_low); 4136 4137 format %{ %} 4138 interface(REG_INTER); 4139 %} 4140 4141 operand regF() %{ 4142 constraint(ALLOC_IN_RC(sflt_reg)); 4143 match(RegF); 4144 4145 format %{ %} 4146 interface(REG_INTER); 4147 %} 4148 4149 operand regD_low() %{ 4150 constraint(ALLOC_IN_RC(dflt_low_reg)); 4151 match(regD); 4152 4153 format %{ %} 4154 interface(REG_INTER); 4155 %} 4156 4157 // Special Registers 4158 4159 // Method Register 4160 operand inline_cache_regP(iRegP reg) %{ 4161 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 4162 match(reg); 4163 format %{ %} 4164 interface(REG_INTER); 4165 %} 4166 4167 operand interpreter_method_oop_regP(iRegP reg) %{ 4168 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 4169 match(reg); 4170 format %{ %} 4171 interface(REG_INTER); 4172 %} 4173 4174 4175 //----------Complex Operands--------------------------------------------------- 4176 // Indirect Memory Reference 4177 operand indirect(sp_ptr_RegP reg) %{ 4178 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4179 match(reg); 4180 4181 op_cost(100); 4182 format %{ "[$reg]" %} 4183 interface(MEMORY_INTER) %{ 4184 base($reg); 4185 index(0x0); 4186 scale(0x0); 4187 disp(0x0); 4188 %} 4189 %} 4190 4191 // Indirect with simm13 Offset 4192 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 4193 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4194 match(AddP reg offset); 4195 4196 op_cost(100); 4197 format %{ "[$reg + $offset]" %} 4198 interface(MEMORY_INTER) %{ 4199 base($reg); 4200 index(0x0); 4201 scale(0x0); 4202 disp($offset); 4203 %} 4204 %} 4205 4206 // Indirect with simm13 Offset minus 7 4207 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 4208 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4209 match(AddP reg offset); 4210 4211 op_cost(100); 4212 format %{ "[$reg + $offset]" %} 4213 interface(MEMORY_INTER) %{ 4214 base($reg); 4215 index(0x0); 4216 scale(0x0); 4217 disp($offset); 4218 %} 4219 %} 4220 4221 // Note: Intel has a swapped version also, like this: 4222 //operand indOffsetX(iRegI reg, immP offset) %{ 4223 // constraint(ALLOC_IN_RC(int_reg)); 4224 // match(AddP offset reg); 4225 // 4226 // op_cost(100); 4227 // format %{ "[$reg + $offset]" %} 4228 // interface(MEMORY_INTER) %{ 4229 // base($reg); 4230 // index(0x0); 4231 // scale(0x0); 4232 // disp($offset); 4233 // %} 4234 //%} 4235 //// However, it doesn't make sense for SPARC, since 4236 // we have no particularly good way to embed oops in 4237 // single instructions. 4238 4239 // Indirect with Register Index 4240 operand indIndex(iRegP addr, iRegX index) %{ 4241 constraint(ALLOC_IN_RC(ptr_reg)); 4242 match(AddP addr index); 4243 4244 op_cost(100); 4245 format %{ "[$addr + $index]" %} 4246 interface(MEMORY_INTER) %{ 4247 base($addr); 4248 index($index); 4249 scale(0x0); 4250 disp(0x0); 4251 %} 4252 %} 4253 4254 //----------Special Memory Operands-------------------------------------------- 4255 // Stack Slot Operand - This operand is used for loading and storing temporary 4256 // values on the stack where a match requires a value to 4257 // flow through memory. 4258 operand stackSlotI(sRegI reg) %{ 4259 constraint(ALLOC_IN_RC(stack_slots)); 4260 op_cost(100); 4261 //match(RegI); 4262 format %{ "[$reg]" %} 4263 interface(MEMORY_INTER) %{ 4264 base(0xE); // R_SP 4265 index(0x0); 4266 scale(0x0); 4267 disp($reg); // Stack Offset 4268 %} 4269 %} 4270 4271 operand stackSlotP(sRegP reg) %{ 4272 constraint(ALLOC_IN_RC(stack_slots)); 4273 op_cost(100); 4274 //match(RegP); 4275 format %{ "[$reg]" %} 4276 interface(MEMORY_INTER) %{ 4277 base(0xE); // R_SP 4278 index(0x0); 4279 scale(0x0); 4280 disp($reg); // Stack Offset 4281 %} 4282 %} 4283 4284 operand stackSlotF(sRegF reg) %{ 4285 constraint(ALLOC_IN_RC(stack_slots)); 4286 op_cost(100); 4287 //match(RegF); 4288 format %{ "[$reg]" %} 4289 interface(MEMORY_INTER) %{ 4290 base(0xE); // R_SP 4291 index(0x0); 4292 scale(0x0); 4293 disp($reg); // Stack Offset 4294 %} 4295 %} 4296 operand stackSlotD(sRegD reg) %{ 4297 constraint(ALLOC_IN_RC(stack_slots)); 4298 op_cost(100); 4299 //match(RegD); 4300 format %{ "[$reg]" %} 4301 interface(MEMORY_INTER) %{ 4302 base(0xE); // R_SP 4303 index(0x0); 4304 scale(0x0); 4305 disp($reg); // Stack Offset 4306 %} 4307 %} 4308 operand stackSlotL(sRegL reg) %{ 4309 constraint(ALLOC_IN_RC(stack_slots)); 4310 op_cost(100); 4311 //match(RegL); 4312 format %{ "[$reg]" %} 4313 interface(MEMORY_INTER) %{ 4314 base(0xE); // R_SP 4315 index(0x0); 4316 scale(0x0); 4317 disp($reg); // Stack Offset 4318 %} 4319 %} 4320 4321 // Operands for expressing Control Flow 4322 // NOTE: Label is a predefined operand which should not be redefined in 4323 // the AD file. It is generically handled within the ADLC. 4324 4325 //----------Conditional Branch Operands---------------------------------------- 4326 // Comparison Op - This is the operation of the comparison, and is limited to 4327 // the following set of codes: 4328 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4329 // 4330 // Other attributes of the comparison, such as unsignedness, are specified 4331 // by the comparison instruction that sets a condition code flags register. 4332 // That result is represented by a flags operand whose subtype is appropriate 4333 // to the unsignedness (etc.) of the comparison. 4334 // 4335 // Later, the instruction which matches both the Comparison Op (a Bool) and 4336 // the flags (produced by the Cmp) specifies the coding of the comparison op 4337 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4338 4339 operand cmpOp() %{ 4340 match(Bool); 4341 4342 format %{ "" %} 4343 interface(COND_INTER) %{ 4344 equal(0x1); 4345 not_equal(0x9); 4346 less(0x3); 4347 greater_equal(0xB); 4348 less_equal(0x2); 4349 greater(0xA); 4350 overflow(0x7); 4351 no_overflow(0xF); 4352 %} 4353 %} 4354 4355 // Comparison Op, unsigned 4356 operand cmpOpU() %{ 4357 match(Bool); 4358 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4359 n->as_Bool()->_test._test != BoolTest::no_overflow); 4360 4361 format %{ "u" %} 4362 interface(COND_INTER) %{ 4363 equal(0x1); 4364 not_equal(0x9); 4365 less(0x5); 4366 greater_equal(0xD); 4367 less_equal(0x4); 4368 greater(0xC); 4369 overflow(0x7); 4370 no_overflow(0xF); 4371 %} 4372 %} 4373 4374 // Comparison Op, pointer (same as unsigned) 4375 operand cmpOpP() %{ 4376 match(Bool); 4377 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4378 n->as_Bool()->_test._test != BoolTest::no_overflow); 4379 4380 format %{ "p" %} 4381 interface(COND_INTER) %{ 4382 equal(0x1); 4383 not_equal(0x9); 4384 less(0x5); 4385 greater_equal(0xD); 4386 less_equal(0x4); 4387 greater(0xC); 4388 overflow(0x7); 4389 no_overflow(0xF); 4390 %} 4391 %} 4392 4393 // Comparison Op, branch-register encoding 4394 operand cmpOp_reg() %{ 4395 match(Bool); 4396 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4397 n->as_Bool()->_test._test != BoolTest::no_overflow); 4398 4399 format %{ "" %} 4400 interface(COND_INTER) %{ 4401 equal (0x1); 4402 not_equal (0x5); 4403 less (0x3); 4404 greater_equal(0x7); 4405 less_equal (0x2); 4406 greater (0x6); 4407 overflow(0x7); // not supported 4408 no_overflow(0xF); // not supported 4409 %} 4410 %} 4411 4412 // Comparison Code, floating, unordered same as less 4413 operand cmpOpF() %{ 4414 match(Bool); 4415 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4416 n->as_Bool()->_test._test != BoolTest::no_overflow); 4417 4418 format %{ "fl" %} 4419 interface(COND_INTER) %{ 4420 equal(0x9); 4421 not_equal(0x1); 4422 less(0x3); 4423 greater_equal(0xB); 4424 less_equal(0xE); 4425 greater(0x6); 4426 4427 overflow(0x7); // not supported 4428 no_overflow(0xF); // not supported 4429 %} 4430 %} 4431 4432 // Used by long compare 4433 operand cmpOp_commute() %{ 4434 match(Bool); 4435 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4436 n->as_Bool()->_test._test != BoolTest::no_overflow); 4437 4438 format %{ "" %} 4439 interface(COND_INTER) %{ 4440 equal(0x1); 4441 not_equal(0x9); 4442 less(0xA); 4443 greater_equal(0x2); 4444 less_equal(0xB); 4445 greater(0x3); 4446 overflow(0x7); 4447 no_overflow(0xF); 4448 %} 4449 %} 4450 4451 //----------OPERAND CLASSES---------------------------------------------------- 4452 // Operand Classes are groups of operands that are used to simplify 4453 // instruction definitions by not requiring the AD writer to specify separate 4454 // instructions for every form of operand when the instruction accepts 4455 // multiple operand types with the same basic encoding and format. The classic 4456 // case of this is memory operands. 4457 opclass memory( indirect, indOffset13, indIndex ); 4458 opclass indIndexMemory( indIndex ); 4459 4460 //----------PIPELINE----------------------------------------------------------- 4461 pipeline %{ 4462 4463 //----------ATTRIBUTES--------------------------------------------------------- 4464 attributes %{ 4465 fixed_size_instructions; // Fixed size instructions 4466 branch_has_delay_slot; // Branch has delay slot following 4467 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4468 instruction_unit_size = 4; // An instruction is 4 bytes long 4469 instruction_fetch_unit_size = 16; // The processor fetches one line 4470 instruction_fetch_units = 1; // of 16 bytes 4471 4472 // List of nop instructions 4473 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4474 %} 4475 4476 //----------RESOURCES---------------------------------------------------------- 4477 // Resources are the functional units available to the machine 4478 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4479 4480 //----------PIPELINE DESCRIPTION----------------------------------------------- 4481 // Pipeline Description specifies the stages in the machine's pipeline 4482 4483 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4484 4485 //----------PIPELINE CLASSES--------------------------------------------------- 4486 // Pipeline Classes describe the stages in which input and output are 4487 // referenced by the hardware pipeline. 4488 4489 // Integer ALU reg-reg operation 4490 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4491 single_instruction; 4492 dst : E(write); 4493 src1 : R(read); 4494 src2 : R(read); 4495 IALU : R; 4496 %} 4497 4498 // Integer ALU reg-reg long operation 4499 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4500 instruction_count(2); 4501 dst : E(write); 4502 src1 : R(read); 4503 src2 : R(read); 4504 IALU : R; 4505 IALU : R; 4506 %} 4507 4508 // Integer ALU reg-reg long dependent operation 4509 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4510 instruction_count(1); multiple_bundles; 4511 dst : E(write); 4512 src1 : R(read); 4513 src2 : R(read); 4514 cr : E(write); 4515 IALU : R(2); 4516 %} 4517 4518 // Integer ALU reg-imm operaion 4519 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4520 single_instruction; 4521 dst : E(write); 4522 src1 : R(read); 4523 IALU : R; 4524 %} 4525 4526 // Integer ALU reg-reg operation with condition code 4527 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4528 single_instruction; 4529 dst : E(write); 4530 cr : E(write); 4531 src1 : R(read); 4532 src2 : R(read); 4533 IALU : R; 4534 %} 4535 4536 // Integer ALU reg-imm operation with condition code 4537 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4538 single_instruction; 4539 dst : E(write); 4540 cr : E(write); 4541 src1 : R(read); 4542 IALU : R; 4543 %} 4544 4545 // Integer ALU zero-reg operation 4546 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4547 single_instruction; 4548 dst : E(write); 4549 src2 : R(read); 4550 IALU : R; 4551 %} 4552 4553 // Integer ALU zero-reg operation with condition code only 4554 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4555 single_instruction; 4556 cr : E(write); 4557 src : R(read); 4558 IALU : R; 4559 %} 4560 4561 // Integer ALU reg-reg operation with condition code only 4562 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4563 single_instruction; 4564 cr : E(write); 4565 src1 : R(read); 4566 src2 : R(read); 4567 IALU : R; 4568 %} 4569 4570 // Integer ALU reg-imm operation with condition code only 4571 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4572 single_instruction; 4573 cr : E(write); 4574 src1 : R(read); 4575 IALU : R; 4576 %} 4577 4578 // Integer ALU reg-reg-zero operation with condition code only 4579 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4580 single_instruction; 4581 cr : E(write); 4582 src1 : R(read); 4583 src2 : R(read); 4584 IALU : R; 4585 %} 4586 4587 // Integer ALU reg-imm-zero operation with condition code only 4588 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4589 single_instruction; 4590 cr : E(write); 4591 src1 : R(read); 4592 IALU : R; 4593 %} 4594 4595 // Integer ALU reg-reg operation with condition code, src1 modified 4596 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4597 single_instruction; 4598 cr : E(write); 4599 src1 : E(write); 4600 src1 : R(read); 4601 src2 : R(read); 4602 IALU : R; 4603 %} 4604 4605 // Integer ALU reg-imm operation with condition code, src1 modified 4606 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4607 single_instruction; 4608 cr : E(write); 4609 src1 : E(write); 4610 src1 : R(read); 4611 IALU : R; 4612 %} 4613 4614 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4615 multiple_bundles; 4616 dst : E(write)+4; 4617 cr : E(write); 4618 src1 : R(read); 4619 src2 : R(read); 4620 IALU : R(3); 4621 BR : R(2); 4622 %} 4623 4624 // Integer ALU operation 4625 pipe_class ialu_none(iRegI dst) %{ 4626 single_instruction; 4627 dst : E(write); 4628 IALU : R; 4629 %} 4630 4631 // Integer ALU reg operation 4632 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4633 single_instruction; may_have_no_code; 4634 dst : E(write); 4635 src : R(read); 4636 IALU : R; 4637 %} 4638 4639 // Integer ALU reg conditional operation 4640 // This instruction has a 1 cycle stall, and cannot execute 4641 // in the same cycle as the instruction setting the condition 4642 // code. We kludge this by pretending to read the condition code 4643 // 1 cycle earlier, and by marking the functional units as busy 4644 // for 2 cycles with the result available 1 cycle later than 4645 // is really the case. 4646 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4647 single_instruction; 4648 op2_out : C(write); 4649 op1 : R(read); 4650 cr : R(read); // This is really E, with a 1 cycle stall 4651 BR : R(2); 4652 MS : R(2); 4653 %} 4654 4655 #ifdef _LP64 4656 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4657 instruction_count(1); multiple_bundles; 4658 dst : C(write)+1; 4659 src : R(read)+1; 4660 IALU : R(1); 4661 BR : E(2); 4662 MS : E(2); 4663 %} 4664 #endif 4665 4666 // Integer ALU reg operation 4667 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4668 single_instruction; may_have_no_code; 4669 dst : E(write); 4670 src : R(read); 4671 IALU : R; 4672 %} 4673 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4674 single_instruction; may_have_no_code; 4675 dst : E(write); 4676 src : R(read); 4677 IALU : R; 4678 %} 4679 4680 // Two integer ALU reg operations 4681 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4682 instruction_count(2); 4683 dst : E(write); 4684 src : R(read); 4685 A0 : R; 4686 A1 : R; 4687 %} 4688 4689 // Two integer ALU reg operations 4690 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4691 instruction_count(2); may_have_no_code; 4692 dst : E(write); 4693 src : R(read); 4694 A0 : R; 4695 A1 : R; 4696 %} 4697 4698 // Integer ALU imm operation 4699 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4700 single_instruction; 4701 dst : E(write); 4702 IALU : R; 4703 %} 4704 4705 // Integer ALU reg-reg with carry operation 4706 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4707 single_instruction; 4708 dst : E(write); 4709 src1 : R(read); 4710 src2 : R(read); 4711 IALU : R; 4712 %} 4713 4714 // Integer ALU cc operation 4715 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4716 single_instruction; 4717 dst : E(write); 4718 cc : R(read); 4719 IALU : R; 4720 %} 4721 4722 // Integer ALU cc / second IALU operation 4723 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4724 instruction_count(1); multiple_bundles; 4725 dst : E(write)+1; 4726 src : R(read); 4727 IALU : R; 4728 %} 4729 4730 // Integer ALU cc / second IALU operation 4731 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4732 instruction_count(1); multiple_bundles; 4733 dst : E(write)+1; 4734 p : R(read); 4735 q : R(read); 4736 IALU : R; 4737 %} 4738 4739 // Integer ALU hi-lo-reg operation 4740 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4741 instruction_count(1); multiple_bundles; 4742 dst : E(write)+1; 4743 IALU : R(2); 4744 %} 4745 4746 // Float ALU hi-lo-reg operation (with temp) 4747 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4748 instruction_count(1); multiple_bundles; 4749 dst : E(write)+1; 4750 IALU : R(2); 4751 %} 4752 4753 // Long Constant 4754 pipe_class loadConL( iRegL dst, immL src ) %{ 4755 instruction_count(2); multiple_bundles; 4756 dst : E(write)+1; 4757 IALU : R(2); 4758 IALU : R(2); 4759 %} 4760 4761 // Pointer Constant 4762 pipe_class loadConP( iRegP dst, immP src ) %{ 4763 instruction_count(0); multiple_bundles; 4764 fixed_latency(6); 4765 %} 4766 4767 // Polling Address 4768 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4769 #ifdef _LP64 4770 instruction_count(0); multiple_bundles; 4771 fixed_latency(6); 4772 #else 4773 dst : E(write); 4774 IALU : R; 4775 #endif 4776 %} 4777 4778 // Long Constant small 4779 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4780 instruction_count(2); 4781 dst : E(write); 4782 IALU : R; 4783 IALU : R; 4784 %} 4785 4786 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4787 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4788 instruction_count(1); multiple_bundles; 4789 src : R(read); 4790 dst : M(write)+1; 4791 IALU : R; 4792 MS : E; 4793 %} 4794 4795 // Integer ALU nop operation 4796 pipe_class ialu_nop() %{ 4797 single_instruction; 4798 IALU : R; 4799 %} 4800 4801 // Integer ALU nop operation 4802 pipe_class ialu_nop_A0() %{ 4803 single_instruction; 4804 A0 : R; 4805 %} 4806 4807 // Integer ALU nop operation 4808 pipe_class ialu_nop_A1() %{ 4809 single_instruction; 4810 A1 : R; 4811 %} 4812 4813 // Integer Multiply reg-reg operation 4814 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4815 single_instruction; 4816 dst : E(write); 4817 src1 : R(read); 4818 src2 : R(read); 4819 MS : R(5); 4820 %} 4821 4822 // Integer Multiply reg-imm operation 4823 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4824 single_instruction; 4825 dst : E(write); 4826 src1 : R(read); 4827 MS : R(5); 4828 %} 4829 4830 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4831 single_instruction; 4832 dst : E(write)+4; 4833 src1 : R(read); 4834 src2 : R(read); 4835 MS : R(6); 4836 %} 4837 4838 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4839 single_instruction; 4840 dst : E(write)+4; 4841 src1 : R(read); 4842 MS : R(6); 4843 %} 4844 4845 // Integer Divide reg-reg 4846 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4847 instruction_count(1); multiple_bundles; 4848 dst : E(write); 4849 temp : E(write); 4850 src1 : R(read); 4851 src2 : R(read); 4852 temp : R(read); 4853 MS : R(38); 4854 %} 4855 4856 // Integer Divide reg-imm 4857 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4858 instruction_count(1); multiple_bundles; 4859 dst : E(write); 4860 temp : E(write); 4861 src1 : R(read); 4862 temp : R(read); 4863 MS : R(38); 4864 %} 4865 4866 // Long Divide 4867 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4868 dst : E(write)+71; 4869 src1 : R(read); 4870 src2 : R(read)+1; 4871 MS : R(70); 4872 %} 4873 4874 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4875 dst : E(write)+71; 4876 src1 : R(read); 4877 MS : R(70); 4878 %} 4879 4880 // Floating Point Add Float 4881 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4882 single_instruction; 4883 dst : X(write); 4884 src1 : E(read); 4885 src2 : E(read); 4886 FA : R; 4887 %} 4888 4889 // Floating Point Add Double 4890 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4891 single_instruction; 4892 dst : X(write); 4893 src1 : E(read); 4894 src2 : E(read); 4895 FA : R; 4896 %} 4897 4898 // Floating Point Conditional Move based on integer flags 4899 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4900 single_instruction; 4901 dst : X(write); 4902 src : E(read); 4903 cr : R(read); 4904 FA : R(2); 4905 BR : R(2); 4906 %} 4907 4908 // Floating Point Conditional Move based on integer flags 4909 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4910 single_instruction; 4911 dst : X(write); 4912 src : E(read); 4913 cr : R(read); 4914 FA : R(2); 4915 BR : R(2); 4916 %} 4917 4918 // Floating Point Multiply Float 4919 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4920 single_instruction; 4921 dst : X(write); 4922 src1 : E(read); 4923 src2 : E(read); 4924 FM : R; 4925 %} 4926 4927 // Floating Point Multiply Double 4928 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4929 single_instruction; 4930 dst : X(write); 4931 src1 : E(read); 4932 src2 : E(read); 4933 FM : R; 4934 %} 4935 4936 // Floating Point Divide Float 4937 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4938 single_instruction; 4939 dst : X(write); 4940 src1 : E(read); 4941 src2 : E(read); 4942 FM : R; 4943 FDIV : C(14); 4944 %} 4945 4946 // Floating Point Divide Double 4947 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4948 single_instruction; 4949 dst : X(write); 4950 src1 : E(read); 4951 src2 : E(read); 4952 FM : R; 4953 FDIV : C(17); 4954 %} 4955 4956 // Floating Point Move/Negate/Abs Float 4957 pipe_class faddF_reg(regF dst, regF src) %{ 4958 single_instruction; 4959 dst : W(write); 4960 src : E(read); 4961 FA : R(1); 4962 %} 4963 4964 // Floating Point Move/Negate/Abs Double 4965 pipe_class faddD_reg(regD dst, regD src) %{ 4966 single_instruction; 4967 dst : W(write); 4968 src : E(read); 4969 FA : R; 4970 %} 4971 4972 // Floating Point Convert F->D 4973 pipe_class fcvtF2D(regD dst, regF src) %{ 4974 single_instruction; 4975 dst : X(write); 4976 src : E(read); 4977 FA : R; 4978 %} 4979 4980 // Floating Point Convert I->D 4981 pipe_class fcvtI2D(regD dst, regF src) %{ 4982 single_instruction; 4983 dst : X(write); 4984 src : E(read); 4985 FA : R; 4986 %} 4987 4988 // Floating Point Convert LHi->D 4989 pipe_class fcvtLHi2D(regD dst, regD src) %{ 4990 single_instruction; 4991 dst : X(write); 4992 src : E(read); 4993 FA : R; 4994 %} 4995 4996 // Floating Point Convert L->D 4997 pipe_class fcvtL2D(regD dst, regF src) %{ 4998 single_instruction; 4999 dst : X(write); 5000 src : E(read); 5001 FA : R; 5002 %} 5003 5004 // Floating Point Convert L->F 5005 pipe_class fcvtL2F(regD dst, regF src) %{ 5006 single_instruction; 5007 dst : X(write); 5008 src : E(read); 5009 FA : R; 5010 %} 5011 5012 // Floating Point Convert D->F 5013 pipe_class fcvtD2F(regD dst, regF src) %{ 5014 single_instruction; 5015 dst : X(write); 5016 src : E(read); 5017 FA : R; 5018 %} 5019 5020 // Floating Point Convert I->L 5021 pipe_class fcvtI2L(regD dst, regF src) %{ 5022 single_instruction; 5023 dst : X(write); 5024 src : E(read); 5025 FA : R; 5026 %} 5027 5028 // Floating Point Convert D->F 5029 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 5030 instruction_count(1); multiple_bundles; 5031 dst : X(write)+6; 5032 src : E(read); 5033 FA : R; 5034 %} 5035 5036 // Floating Point Convert D->L 5037 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 5038 instruction_count(1); multiple_bundles; 5039 dst : X(write)+6; 5040 src : E(read); 5041 FA : R; 5042 %} 5043 5044 // Floating Point Convert F->I 5045 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 5046 instruction_count(1); multiple_bundles; 5047 dst : X(write)+6; 5048 src : E(read); 5049 FA : R; 5050 %} 5051 5052 // Floating Point Convert F->L 5053 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 5054 instruction_count(1); multiple_bundles; 5055 dst : X(write)+6; 5056 src : E(read); 5057 FA : R; 5058 %} 5059 5060 // Floating Point Convert I->F 5061 pipe_class fcvtI2F(regF dst, regF src) %{ 5062 single_instruction; 5063 dst : X(write); 5064 src : E(read); 5065 FA : R; 5066 %} 5067 5068 // Floating Point Compare 5069 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 5070 single_instruction; 5071 cr : X(write); 5072 src1 : E(read); 5073 src2 : E(read); 5074 FA : R; 5075 %} 5076 5077 // Floating Point Compare 5078 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 5079 single_instruction; 5080 cr : X(write); 5081 src1 : E(read); 5082 src2 : E(read); 5083 FA : R; 5084 %} 5085 5086 // Floating Add Nop 5087 pipe_class fadd_nop() %{ 5088 single_instruction; 5089 FA : R; 5090 %} 5091 5092 // Integer Store to Memory 5093 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 5094 single_instruction; 5095 mem : R(read); 5096 src : C(read); 5097 MS : R; 5098 %} 5099 5100 // Integer Store to Memory 5101 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 5102 single_instruction; 5103 mem : R(read); 5104 src : C(read); 5105 MS : R; 5106 %} 5107 5108 // Integer Store Zero to Memory 5109 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 5110 single_instruction; 5111 mem : R(read); 5112 MS : R; 5113 %} 5114 5115 // Special Stack Slot Store 5116 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 5117 single_instruction; 5118 stkSlot : R(read); 5119 src : C(read); 5120 MS : R; 5121 %} 5122 5123 // Special Stack Slot Store 5124 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 5125 instruction_count(2); multiple_bundles; 5126 stkSlot : R(read); 5127 src : C(read); 5128 MS : R(2); 5129 %} 5130 5131 // Float Store 5132 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 5133 single_instruction; 5134 mem : R(read); 5135 src : C(read); 5136 MS : R; 5137 %} 5138 5139 // Float Store 5140 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 5141 single_instruction; 5142 mem : R(read); 5143 MS : R; 5144 %} 5145 5146 // Double Store 5147 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 5148 instruction_count(1); 5149 mem : R(read); 5150 src : C(read); 5151 MS : R; 5152 %} 5153 5154 // Double Store 5155 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 5156 single_instruction; 5157 mem : R(read); 5158 MS : R; 5159 %} 5160 5161 // Special Stack Slot Float Store 5162 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 5163 single_instruction; 5164 stkSlot : R(read); 5165 src : C(read); 5166 MS : R; 5167 %} 5168 5169 // Special Stack Slot Double Store 5170 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 5171 single_instruction; 5172 stkSlot : R(read); 5173 src : C(read); 5174 MS : R; 5175 %} 5176 5177 // Integer Load (when sign bit propagation not needed) 5178 pipe_class iload_mem(iRegI dst, memory mem) %{ 5179 single_instruction; 5180 mem : R(read); 5181 dst : C(write); 5182 MS : R; 5183 %} 5184 5185 // Integer Load from stack operand 5186 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 5187 single_instruction; 5188 mem : R(read); 5189 dst : C(write); 5190 MS : R; 5191 %} 5192 5193 // Integer Load (when sign bit propagation or masking is needed) 5194 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 5195 single_instruction; 5196 mem : R(read); 5197 dst : M(write); 5198 MS : R; 5199 %} 5200 5201 // Float Load 5202 pipe_class floadF_mem(regF dst, memory mem) %{ 5203 single_instruction; 5204 mem : R(read); 5205 dst : M(write); 5206 MS : R; 5207 %} 5208 5209 // Float Load 5210 pipe_class floadD_mem(regD dst, memory mem) %{ 5211 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 5212 mem : R(read); 5213 dst : M(write); 5214 MS : R; 5215 %} 5216 5217 // Float Load 5218 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 5219 single_instruction; 5220 stkSlot : R(read); 5221 dst : M(write); 5222 MS : R; 5223 %} 5224 5225 // Float Load 5226 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 5227 single_instruction; 5228 stkSlot : R(read); 5229 dst : M(write); 5230 MS : R; 5231 %} 5232 5233 // Memory Nop 5234 pipe_class mem_nop() %{ 5235 single_instruction; 5236 MS : R; 5237 %} 5238 5239 pipe_class sethi(iRegP dst, immI src) %{ 5240 single_instruction; 5241 dst : E(write); 5242 IALU : R; 5243 %} 5244 5245 pipe_class loadPollP(iRegP poll) %{ 5246 single_instruction; 5247 poll : R(read); 5248 MS : R; 5249 %} 5250 5251 pipe_class br(Universe br, label labl) %{ 5252 single_instruction_with_delay_slot; 5253 BR : R; 5254 %} 5255 5256 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 5257 single_instruction_with_delay_slot; 5258 cr : E(read); 5259 BR : R; 5260 %} 5261 5262 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 5263 single_instruction_with_delay_slot; 5264 op1 : E(read); 5265 BR : R; 5266 MS : R; 5267 %} 5268 5269 // Compare and branch 5270 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 5271 instruction_count(2); has_delay_slot; 5272 cr : E(write); 5273 src1 : R(read); 5274 src2 : R(read); 5275 IALU : R; 5276 BR : R; 5277 %} 5278 5279 // Compare and branch 5280 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5281 instruction_count(2); has_delay_slot; 5282 cr : E(write); 5283 src1 : R(read); 5284 IALU : R; 5285 BR : R; 5286 %} 5287 5288 // Compare and branch using cbcond 5289 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5290 single_instruction; 5291 src1 : E(read); 5292 src2 : E(read); 5293 IALU : R; 5294 BR : R; 5295 %} 5296 5297 // Compare and branch using cbcond 5298 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5299 single_instruction; 5300 src1 : E(read); 5301 IALU : R; 5302 BR : R; 5303 %} 5304 5305 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5306 single_instruction_with_delay_slot; 5307 cr : E(read); 5308 BR : R; 5309 %} 5310 5311 pipe_class br_nop() %{ 5312 single_instruction; 5313 BR : R; 5314 %} 5315 5316 pipe_class simple_call(method meth) %{ 5317 instruction_count(2); multiple_bundles; force_serialization; 5318 fixed_latency(100); 5319 BR : R(1); 5320 MS : R(1); 5321 A0 : R(1); 5322 %} 5323 5324 pipe_class compiled_call(method meth) %{ 5325 instruction_count(1); multiple_bundles; force_serialization; 5326 fixed_latency(100); 5327 MS : R(1); 5328 %} 5329 5330 pipe_class call(method meth) %{ 5331 instruction_count(0); multiple_bundles; force_serialization; 5332 fixed_latency(100); 5333 %} 5334 5335 pipe_class tail_call(Universe ignore, label labl) %{ 5336 single_instruction; has_delay_slot; 5337 fixed_latency(100); 5338 BR : R(1); 5339 MS : R(1); 5340 %} 5341 5342 pipe_class ret(Universe ignore) %{ 5343 single_instruction; has_delay_slot; 5344 BR : R(1); 5345 MS : R(1); 5346 %} 5347 5348 pipe_class ret_poll(g3RegP poll) %{ 5349 instruction_count(3); has_delay_slot; 5350 poll : E(read); 5351 MS : R; 5352 %} 5353 5354 // The real do-nothing guy 5355 pipe_class empty( ) %{ 5356 instruction_count(0); 5357 %} 5358 5359 pipe_class long_memory_op() %{ 5360 instruction_count(0); multiple_bundles; force_serialization; 5361 fixed_latency(25); 5362 MS : R(1); 5363 %} 5364 5365 // Check-cast 5366 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5367 array : R(read); 5368 match : R(read); 5369 IALU : R(2); 5370 BR : R(2); 5371 MS : R; 5372 %} 5373 5374 // Convert FPU flags into +1,0,-1 5375 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5376 src1 : E(read); 5377 src2 : E(read); 5378 dst : E(write); 5379 FA : R; 5380 MS : R(2); 5381 BR : R(2); 5382 %} 5383 5384 // Compare for p < q, and conditionally add y 5385 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5386 p : E(read); 5387 q : E(read); 5388 y : E(read); 5389 IALU : R(3) 5390 %} 5391 5392 // Perform a compare, then move conditionally in a branch delay slot. 5393 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5394 src2 : E(read); 5395 srcdst : E(read); 5396 IALU : R; 5397 BR : R; 5398 %} 5399 5400 // Define the class for the Nop node 5401 define %{ 5402 MachNop = ialu_nop; 5403 %} 5404 5405 %} 5406 5407 //----------INSTRUCTIONS------------------------------------------------------- 5408 5409 //------------Special Stack Slot instructions - no match rules----------------- 5410 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5411 // No match rule to avoid chain rule match. 5412 effect(DEF dst, USE src); 5413 ins_cost(MEMORY_REF_COST); 5414 size(4); 5415 format %{ "LDF $src,$dst\t! stkI to regF" %} 5416 opcode(Assembler::ldf_op3); 5417 ins_encode(simple_form3_mem_reg(src, dst)); 5418 ins_pipe(floadF_stk); 5419 %} 5420 5421 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5422 // No match rule to avoid chain rule match. 5423 effect(DEF dst, USE src); 5424 ins_cost(MEMORY_REF_COST); 5425 size(4); 5426 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5427 opcode(Assembler::lddf_op3); 5428 ins_encode(simple_form3_mem_reg(src, dst)); 5429 ins_pipe(floadD_stk); 5430 %} 5431 5432 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5433 // No match rule to avoid chain rule match. 5434 effect(DEF dst, USE src); 5435 ins_cost(MEMORY_REF_COST); 5436 size(4); 5437 format %{ "STF $src,$dst\t! regF to stkI" %} 5438 opcode(Assembler::stf_op3); 5439 ins_encode(simple_form3_mem_reg(dst, src)); 5440 ins_pipe(fstoreF_stk_reg); 5441 %} 5442 5443 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5444 // No match rule to avoid chain rule match. 5445 effect(DEF dst, USE src); 5446 ins_cost(MEMORY_REF_COST); 5447 size(4); 5448 format %{ "STDF $src,$dst\t! regD to stkL" %} 5449 opcode(Assembler::stdf_op3); 5450 ins_encode(simple_form3_mem_reg(dst, src)); 5451 ins_pipe(fstoreD_stk_reg); 5452 %} 5453 5454 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5455 effect(DEF dst, USE src); 5456 ins_cost(MEMORY_REF_COST*2); 5457 size(8); 5458 format %{ "STW $src,$dst.hi\t! long\n\t" 5459 "STW R_G0,$dst.lo" %} 5460 opcode(Assembler::stw_op3); 5461 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5462 ins_pipe(lstoreI_stk_reg); 5463 %} 5464 5465 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5466 // No match rule to avoid chain rule match. 5467 effect(DEF dst, USE src); 5468 ins_cost(MEMORY_REF_COST); 5469 size(4); 5470 format %{ "STX $src,$dst\t! regL to stkD" %} 5471 opcode(Assembler::stx_op3); 5472 ins_encode(simple_form3_mem_reg( dst, src ) ); 5473 ins_pipe(istore_stk_reg); 5474 %} 5475 5476 //---------- Chain stack slots between similar types -------- 5477 5478 // Load integer from stack slot 5479 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5480 match(Set dst src); 5481 ins_cost(MEMORY_REF_COST); 5482 5483 size(4); 5484 format %{ "LDUW $src,$dst\t!stk" %} 5485 opcode(Assembler::lduw_op3); 5486 ins_encode(simple_form3_mem_reg( src, dst ) ); 5487 ins_pipe(iload_mem); 5488 %} 5489 5490 // Store integer to stack slot 5491 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5492 match(Set dst src); 5493 ins_cost(MEMORY_REF_COST); 5494 5495 size(4); 5496 format %{ "STW $src,$dst\t!stk" %} 5497 opcode(Assembler::stw_op3); 5498 ins_encode(simple_form3_mem_reg( dst, src ) ); 5499 ins_pipe(istore_mem_reg); 5500 %} 5501 5502 // Load long from stack slot 5503 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5504 match(Set dst src); 5505 5506 ins_cost(MEMORY_REF_COST); 5507 size(4); 5508 format %{ "LDX $src,$dst\t! long" %} 5509 opcode(Assembler::ldx_op3); 5510 ins_encode(simple_form3_mem_reg( src, dst ) ); 5511 ins_pipe(iload_mem); 5512 %} 5513 5514 // Store long to stack slot 5515 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5516 match(Set dst src); 5517 5518 ins_cost(MEMORY_REF_COST); 5519 size(4); 5520 format %{ "STX $src,$dst\t! long" %} 5521 opcode(Assembler::stx_op3); 5522 ins_encode(simple_form3_mem_reg( dst, src ) ); 5523 ins_pipe(istore_mem_reg); 5524 %} 5525 5526 #ifdef _LP64 5527 // Load pointer from stack slot, 64-bit encoding 5528 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5529 match(Set dst src); 5530 ins_cost(MEMORY_REF_COST); 5531 size(4); 5532 format %{ "LDX $src,$dst\t!ptr" %} 5533 opcode(Assembler::ldx_op3); 5534 ins_encode(simple_form3_mem_reg( src, dst ) ); 5535 ins_pipe(iload_mem); 5536 %} 5537 5538 // Store pointer to stack slot 5539 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5540 match(Set dst src); 5541 ins_cost(MEMORY_REF_COST); 5542 size(4); 5543 format %{ "STX $src,$dst\t!ptr" %} 5544 opcode(Assembler::stx_op3); 5545 ins_encode(simple_form3_mem_reg( dst, src ) ); 5546 ins_pipe(istore_mem_reg); 5547 %} 5548 #else // _LP64 5549 // Load pointer from stack slot, 32-bit encoding 5550 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5551 match(Set dst src); 5552 ins_cost(MEMORY_REF_COST); 5553 format %{ "LDUW $src,$dst\t!ptr" %} 5554 opcode(Assembler::lduw_op3, Assembler::ldst_op); 5555 ins_encode(simple_form3_mem_reg( src, dst ) ); 5556 ins_pipe(iload_mem); 5557 %} 5558 5559 // Store pointer to stack slot 5560 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5561 match(Set dst src); 5562 ins_cost(MEMORY_REF_COST); 5563 format %{ "STW $src,$dst\t!ptr" %} 5564 opcode(Assembler::stw_op3, Assembler::ldst_op); 5565 ins_encode(simple_form3_mem_reg( dst, src ) ); 5566 ins_pipe(istore_mem_reg); 5567 %} 5568 #endif // _LP64 5569 5570 //------------Special Nop instructions for bundling - no match rules----------- 5571 // Nop using the A0 functional unit 5572 instruct Nop_A0() %{ 5573 ins_cost(0); 5574 5575 format %{ "NOP ! Alu Pipeline" %} 5576 opcode(Assembler::or_op3, Assembler::arith_op); 5577 ins_encode( form2_nop() ); 5578 ins_pipe(ialu_nop_A0); 5579 %} 5580 5581 // Nop using the A1 functional unit 5582 instruct Nop_A1( ) %{ 5583 ins_cost(0); 5584 5585 format %{ "NOP ! Alu Pipeline" %} 5586 opcode(Assembler::or_op3, Assembler::arith_op); 5587 ins_encode( form2_nop() ); 5588 ins_pipe(ialu_nop_A1); 5589 %} 5590 5591 // Nop using the memory functional unit 5592 instruct Nop_MS( ) %{ 5593 ins_cost(0); 5594 5595 format %{ "NOP ! Memory Pipeline" %} 5596 ins_encode( emit_mem_nop ); 5597 ins_pipe(mem_nop); 5598 %} 5599 5600 // Nop using the floating add functional unit 5601 instruct Nop_FA( ) %{ 5602 ins_cost(0); 5603 5604 format %{ "NOP ! Floating Add Pipeline" %} 5605 ins_encode( emit_fadd_nop ); 5606 ins_pipe(fadd_nop); 5607 %} 5608 5609 // Nop using the branch functional unit 5610 instruct Nop_BR( ) %{ 5611 ins_cost(0); 5612 5613 format %{ "NOP ! Branch Pipeline" %} 5614 ins_encode( emit_br_nop ); 5615 ins_pipe(br_nop); 5616 %} 5617 5618 //----------Load/Store/Move Instructions--------------------------------------- 5619 //----------Load Instructions-------------------------------------------------- 5620 // Load Byte (8bit signed) 5621 instruct loadB(iRegI dst, memory mem) %{ 5622 match(Set dst (LoadB mem)); 5623 ins_cost(MEMORY_REF_COST); 5624 5625 size(4); 5626 format %{ "LDSB $mem,$dst\t! byte" %} 5627 ins_encode %{ 5628 __ ldsb($mem$$Address, $dst$$Register); 5629 %} 5630 ins_pipe(iload_mask_mem); 5631 %} 5632 5633 // Load Byte (8bit signed) into a Long Register 5634 instruct loadB2L(iRegL dst, memory mem) %{ 5635 match(Set dst (ConvI2L (LoadB mem))); 5636 ins_cost(MEMORY_REF_COST); 5637 5638 size(4); 5639 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5640 ins_encode %{ 5641 __ ldsb($mem$$Address, $dst$$Register); 5642 %} 5643 ins_pipe(iload_mask_mem); 5644 %} 5645 5646 // Load Unsigned Byte (8bit UNsigned) into an int reg 5647 instruct loadUB(iRegI dst, memory mem) %{ 5648 match(Set dst (LoadUB mem)); 5649 ins_cost(MEMORY_REF_COST); 5650 5651 size(4); 5652 format %{ "LDUB $mem,$dst\t! ubyte" %} 5653 ins_encode %{ 5654 __ ldub($mem$$Address, $dst$$Register); 5655 %} 5656 ins_pipe(iload_mem); 5657 %} 5658 5659 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5660 instruct loadUB2L(iRegL dst, memory mem) %{ 5661 match(Set dst (ConvI2L (LoadUB mem))); 5662 ins_cost(MEMORY_REF_COST); 5663 5664 size(4); 5665 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5666 ins_encode %{ 5667 __ ldub($mem$$Address, $dst$$Register); 5668 %} 5669 ins_pipe(iload_mem); 5670 %} 5671 5672 // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register 5673 instruct loadUB2L_immI(iRegL dst, memory mem, immI mask) %{ 5674 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5675 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5676 5677 size(2*4); 5678 format %{ "LDUB $mem,$dst\t# ubyte & 32-bit mask -> long\n\t" 5679 "AND $dst,right_n_bits($mask, 8),$dst" %} 5680 ins_encode %{ 5681 __ ldub($mem$$Address, $dst$$Register); 5682 __ and3($dst$$Register, $mask$$constant & right_n_bits(8), $dst$$Register); 5683 %} 5684 ins_pipe(iload_mem); 5685 %} 5686 5687 // Load Short (16bit signed) 5688 instruct loadS(iRegI dst, memory mem) %{ 5689 match(Set dst (LoadS mem)); 5690 ins_cost(MEMORY_REF_COST); 5691 5692 size(4); 5693 format %{ "LDSH $mem,$dst\t! short" %} 5694 ins_encode %{ 5695 __ ldsh($mem$$Address, $dst$$Register); 5696 %} 5697 ins_pipe(iload_mask_mem); 5698 %} 5699 5700 // Load Short (16 bit signed) to Byte (8 bit signed) 5701 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5702 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5703 ins_cost(MEMORY_REF_COST); 5704 5705 size(4); 5706 5707 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5708 ins_encode %{ 5709 __ ldsb($mem$$Address, $dst$$Register, 1); 5710 %} 5711 ins_pipe(iload_mask_mem); 5712 %} 5713 5714 // Load Short (16bit signed) into a Long Register 5715 instruct loadS2L(iRegL dst, memory mem) %{ 5716 match(Set dst (ConvI2L (LoadS mem))); 5717 ins_cost(MEMORY_REF_COST); 5718 5719 size(4); 5720 format %{ "LDSH $mem,$dst\t! short -> long" %} 5721 ins_encode %{ 5722 __ ldsh($mem$$Address, $dst$$Register); 5723 %} 5724 ins_pipe(iload_mask_mem); 5725 %} 5726 5727 // Load Unsigned Short/Char (16bit UNsigned) 5728 instruct loadUS(iRegI dst, memory mem) %{ 5729 match(Set dst (LoadUS mem)); 5730 ins_cost(MEMORY_REF_COST); 5731 5732 size(4); 5733 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5734 ins_encode %{ 5735 __ lduh($mem$$Address, $dst$$Register); 5736 %} 5737 ins_pipe(iload_mem); 5738 %} 5739 5740 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5741 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5742 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5743 ins_cost(MEMORY_REF_COST); 5744 5745 size(4); 5746 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5747 ins_encode %{ 5748 __ ldsb($mem$$Address, $dst$$Register, 1); 5749 %} 5750 ins_pipe(iload_mask_mem); 5751 %} 5752 5753 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5754 instruct loadUS2L(iRegL dst, memory mem) %{ 5755 match(Set dst (ConvI2L (LoadUS mem))); 5756 ins_cost(MEMORY_REF_COST); 5757 5758 size(4); 5759 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5760 ins_encode %{ 5761 __ lduh($mem$$Address, $dst$$Register); 5762 %} 5763 ins_pipe(iload_mem); 5764 %} 5765 5766 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5767 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5768 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5769 ins_cost(MEMORY_REF_COST); 5770 5771 size(4); 5772 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5773 ins_encode %{ 5774 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5775 %} 5776 ins_pipe(iload_mem); 5777 %} 5778 5779 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5780 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5781 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5782 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5783 5784 size(2*4); 5785 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5786 "AND $dst,$mask,$dst" %} 5787 ins_encode %{ 5788 Register Rdst = $dst$$Register; 5789 __ lduh($mem$$Address, Rdst); 5790 __ and3(Rdst, $mask$$constant, Rdst); 5791 %} 5792 ins_pipe(iload_mem); 5793 %} 5794 5795 // Load Unsigned Short/Char (16bit UNsigned) with a 32-bit mask into a Long Register 5796 instruct loadUS2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5797 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5798 effect(TEMP dst, TEMP tmp); 5799 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5800 5801 format %{ "LDUH $mem,$dst\t! ushort/char & 32-bit mask -> long\n\t" 5802 "SET right_n_bits($mask, 16),$tmp\n\t" 5803 "AND $dst,$tmp,$dst" %} 5804 ins_encode %{ 5805 Register Rdst = $dst$$Register; 5806 Register Rtmp = $tmp$$Register; 5807 __ lduh($mem$$Address, Rdst); 5808 __ set($mask$$constant & right_n_bits(16), Rtmp); 5809 __ and3(Rdst, Rtmp, Rdst); 5810 %} 5811 ins_pipe(iload_mem); 5812 %} 5813 5814 // Load Integer 5815 instruct loadI(iRegI dst, memory mem) %{ 5816 match(Set dst (LoadI mem)); 5817 ins_cost(MEMORY_REF_COST); 5818 5819 size(4); 5820 format %{ "LDUW $mem,$dst\t! int" %} 5821 ins_encode %{ 5822 __ lduw($mem$$Address, $dst$$Register); 5823 %} 5824 ins_pipe(iload_mem); 5825 %} 5826 5827 // Load Integer to Byte (8 bit signed) 5828 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5829 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5830 ins_cost(MEMORY_REF_COST); 5831 5832 size(4); 5833 5834 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5835 ins_encode %{ 5836 __ ldsb($mem$$Address, $dst$$Register, 3); 5837 %} 5838 ins_pipe(iload_mask_mem); 5839 %} 5840 5841 // Load Integer to Unsigned Byte (8 bit UNsigned) 5842 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5843 match(Set dst (AndI (LoadI mem) mask)); 5844 ins_cost(MEMORY_REF_COST); 5845 5846 size(4); 5847 5848 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5849 ins_encode %{ 5850 __ ldub($mem$$Address, $dst$$Register, 3); 5851 %} 5852 ins_pipe(iload_mask_mem); 5853 %} 5854 5855 // Load Integer to Short (16 bit signed) 5856 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5857 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5858 ins_cost(MEMORY_REF_COST); 5859 5860 size(4); 5861 5862 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5863 ins_encode %{ 5864 __ ldsh($mem$$Address, $dst$$Register, 2); 5865 %} 5866 ins_pipe(iload_mask_mem); 5867 %} 5868 5869 // Load Integer to Unsigned Short (16 bit UNsigned) 5870 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5871 match(Set dst (AndI (LoadI mem) mask)); 5872 ins_cost(MEMORY_REF_COST); 5873 5874 size(4); 5875 5876 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5877 ins_encode %{ 5878 __ lduh($mem$$Address, $dst$$Register, 2); 5879 %} 5880 ins_pipe(iload_mask_mem); 5881 %} 5882 5883 // Load Integer into a Long Register 5884 instruct loadI2L(iRegL dst, memory mem) %{ 5885 match(Set dst (ConvI2L (LoadI mem))); 5886 ins_cost(MEMORY_REF_COST); 5887 5888 size(4); 5889 format %{ "LDSW $mem,$dst\t! int -> long" %} 5890 ins_encode %{ 5891 __ ldsw($mem$$Address, $dst$$Register); 5892 %} 5893 ins_pipe(iload_mask_mem); 5894 %} 5895 5896 // Load Integer with mask 0xFF into a Long Register 5897 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5898 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5899 ins_cost(MEMORY_REF_COST); 5900 5901 size(4); 5902 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5903 ins_encode %{ 5904 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5905 %} 5906 ins_pipe(iload_mem); 5907 %} 5908 5909 // Load Integer with mask 0xFFFF into a Long Register 5910 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5911 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5912 ins_cost(MEMORY_REF_COST); 5913 5914 size(4); 5915 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5916 ins_encode %{ 5917 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5918 %} 5919 ins_pipe(iload_mem); 5920 %} 5921 5922 // Load Integer with a 12-bit mask into a Long Register 5923 instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{ 5924 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5925 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5926 5927 size(2*4); 5928 format %{ "LDUW $mem,$dst\t! int & 12-bit mask -> long\n\t" 5929 "AND $dst,$mask,$dst" %} 5930 ins_encode %{ 5931 Register Rdst = $dst$$Register; 5932 __ lduw($mem$$Address, Rdst); 5933 __ and3(Rdst, $mask$$constant, Rdst); 5934 %} 5935 ins_pipe(iload_mem); 5936 %} 5937 5938 // Load Integer with a 31-bit mask into a Long Register 5939 instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{ 5940 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5941 effect(TEMP dst, TEMP tmp); 5942 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5943 5944 format %{ "LDUW $mem,$dst\t! int & 31-bit mask -> long\n\t" 5945 "SET $mask,$tmp\n\t" 5946 "AND $dst,$tmp,$dst" %} 5947 ins_encode %{ 5948 Register Rdst = $dst$$Register; 5949 Register Rtmp = $tmp$$Register; 5950 __ lduw($mem$$Address, Rdst); 5951 __ set($mask$$constant, Rtmp); 5952 __ and3(Rdst, Rtmp, Rdst); 5953 %} 5954 ins_pipe(iload_mem); 5955 %} 5956 5957 // Load Unsigned Integer into a Long Register 5958 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{ 5959 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5960 ins_cost(MEMORY_REF_COST); 5961 5962 size(4); 5963 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5964 ins_encode %{ 5965 __ lduw($mem$$Address, $dst$$Register); 5966 %} 5967 ins_pipe(iload_mem); 5968 %} 5969 5970 // Load Long - aligned 5971 instruct loadL(iRegL dst, memory mem ) %{ 5972 match(Set dst (LoadL mem)); 5973 ins_cost(MEMORY_REF_COST); 5974 5975 size(4); 5976 format %{ "LDX $mem,$dst\t! long" %} 5977 ins_encode %{ 5978 __ ldx($mem$$Address, $dst$$Register); 5979 %} 5980 ins_pipe(iload_mem); 5981 %} 5982 5983 // Load Long - UNaligned 5984 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5985 match(Set dst (LoadL_unaligned mem)); 5986 effect(KILL tmp); 5987 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5988 size(16); 5989 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 5990 "\tLDUW $mem ,$dst\n" 5991 "\tSLLX #32, $dst, $dst\n" 5992 "\tOR $dst, R_O7, $dst" %} 5993 opcode(Assembler::lduw_op3); 5994 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 5995 ins_pipe(iload_mem); 5996 %} 5997 5998 // Load Range 5999 instruct loadRange(iRegI dst, memory mem) %{ 6000 match(Set dst (LoadRange mem)); 6001 ins_cost(MEMORY_REF_COST); 6002 6003 size(4); 6004 format %{ "LDUW $mem,$dst\t! range" %} 6005 opcode(Assembler::lduw_op3); 6006 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6007 ins_pipe(iload_mem); 6008 %} 6009 6010 // Load Integer into %f register (for fitos/fitod) 6011 instruct loadI_freg(regF dst, memory mem) %{ 6012 match(Set dst (LoadI mem)); 6013 ins_cost(MEMORY_REF_COST); 6014 size(4); 6015 6016 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 6017 opcode(Assembler::ldf_op3); 6018 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6019 ins_pipe(floadF_mem); 6020 %} 6021 6022 // Load Pointer 6023 instruct loadP(iRegP dst, memory mem) %{ 6024 match(Set dst (LoadP mem)); 6025 ins_cost(MEMORY_REF_COST); 6026 size(4); 6027 6028 #ifndef _LP64 6029 format %{ "LDUW $mem,$dst\t! ptr" %} 6030 ins_encode %{ 6031 __ lduw($mem$$Address, $dst$$Register); 6032 %} 6033 #else 6034 format %{ "LDX $mem,$dst\t! ptr" %} 6035 ins_encode %{ 6036 __ ldx($mem$$Address, $dst$$Register); 6037 %} 6038 #endif 6039 ins_pipe(iload_mem); 6040 %} 6041 6042 // Load Compressed Pointer 6043 instruct loadN(iRegN dst, memory mem) %{ 6044 match(Set dst (LoadN mem)); 6045 ins_cost(MEMORY_REF_COST); 6046 size(4); 6047 6048 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 6049 ins_encode %{ 6050 __ lduw($mem$$Address, $dst$$Register); 6051 %} 6052 ins_pipe(iload_mem); 6053 %} 6054 6055 // Load Klass Pointer 6056 instruct loadKlass(iRegP dst, memory mem) %{ 6057 match(Set dst (LoadKlass mem)); 6058 ins_cost(MEMORY_REF_COST); 6059 size(4); 6060 6061 #ifndef _LP64 6062 format %{ "LDUW $mem,$dst\t! klass ptr" %} 6063 ins_encode %{ 6064 __ lduw($mem$$Address, $dst$$Register); 6065 %} 6066 #else 6067 format %{ "LDX $mem,$dst\t! klass ptr" %} 6068 ins_encode %{ 6069 __ ldx($mem$$Address, $dst$$Register); 6070 %} 6071 #endif 6072 ins_pipe(iload_mem); 6073 %} 6074 6075 // Load narrow Klass Pointer 6076 instruct loadNKlass(iRegN dst, memory mem) %{ 6077 match(Set dst (LoadNKlass mem)); 6078 ins_cost(MEMORY_REF_COST); 6079 size(4); 6080 6081 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 6082 ins_encode %{ 6083 __ lduw($mem$$Address, $dst$$Register); 6084 %} 6085 ins_pipe(iload_mem); 6086 %} 6087 6088 // Load Double 6089 instruct loadD(regD dst, memory mem) %{ 6090 match(Set dst (LoadD mem)); 6091 ins_cost(MEMORY_REF_COST); 6092 6093 size(4); 6094 format %{ "LDDF $mem,$dst" %} 6095 opcode(Assembler::lddf_op3); 6096 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6097 ins_pipe(floadD_mem); 6098 %} 6099 6100 // Load Double - UNaligned 6101 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 6102 match(Set dst (LoadD_unaligned mem)); 6103 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 6104 size(8); 6105 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 6106 "\tLDF $mem+4,$dst.lo\t!" %} 6107 opcode(Assembler::ldf_op3); 6108 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 6109 ins_pipe(iload_mem); 6110 %} 6111 6112 // Load Float 6113 instruct loadF(regF dst, memory mem) %{ 6114 match(Set dst (LoadF mem)); 6115 ins_cost(MEMORY_REF_COST); 6116 6117 size(4); 6118 format %{ "LDF $mem,$dst" %} 6119 opcode(Assembler::ldf_op3); 6120 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6121 ins_pipe(floadF_mem); 6122 %} 6123 6124 // Load Constant 6125 instruct loadConI( iRegI dst, immI src ) %{ 6126 match(Set dst src); 6127 ins_cost(DEFAULT_COST * 3/2); 6128 format %{ "SET $src,$dst" %} 6129 ins_encode( Set32(src, dst) ); 6130 ins_pipe(ialu_hi_lo_reg); 6131 %} 6132 6133 instruct loadConI13( iRegI dst, immI13 src ) %{ 6134 match(Set dst src); 6135 6136 size(4); 6137 format %{ "MOV $src,$dst" %} 6138 ins_encode( Set13( src, dst ) ); 6139 ins_pipe(ialu_imm); 6140 %} 6141 6142 #ifndef _LP64 6143 instruct loadConP(iRegP dst, immP con) %{ 6144 match(Set dst con); 6145 ins_cost(DEFAULT_COST * 3/2); 6146 format %{ "SET $con,$dst\t!ptr" %} 6147 ins_encode %{ 6148 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6149 intptr_t val = $con$$constant; 6150 if (constant_reloc == relocInfo::oop_type) { 6151 __ set_oop_constant((jobject) val, $dst$$Register); 6152 } else if (constant_reloc == relocInfo::metadata_type) { 6153 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6154 } else { // non-oop pointers, e.g. card mark base, heap top 6155 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6156 __ set(val, $dst$$Register); 6157 } 6158 %} 6159 ins_pipe(loadConP); 6160 %} 6161 #else 6162 instruct loadConP_set(iRegP dst, immP_set con) %{ 6163 match(Set dst con); 6164 ins_cost(DEFAULT_COST * 3/2); 6165 format %{ "SET $con,$dst\t! ptr" %} 6166 ins_encode %{ 6167 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6168 intptr_t val = $con$$constant; 6169 if (constant_reloc == relocInfo::oop_type) { 6170 __ set_oop_constant((jobject) val, $dst$$Register); 6171 } else if (constant_reloc == relocInfo::metadata_type) { 6172 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6173 } else { // non-oop pointers, e.g. card mark base, heap top 6174 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6175 __ set(val, $dst$$Register); 6176 } 6177 %} 6178 ins_pipe(loadConP); 6179 %} 6180 6181 instruct loadConP_load(iRegP dst, immP_load con) %{ 6182 match(Set dst con); 6183 ins_cost(MEMORY_REF_COST); 6184 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 6185 ins_encode %{ 6186 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6187 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 6188 %} 6189 ins_pipe(loadConP); 6190 %} 6191 6192 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 6193 match(Set dst con); 6194 ins_cost(DEFAULT_COST * 3/2); 6195 format %{ "SET $con,$dst\t! non-oop ptr" %} 6196 ins_encode %{ 6197 if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) { 6198 __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register); 6199 } else { 6200 __ set($con$$constant, $dst$$Register); 6201 } 6202 %} 6203 ins_pipe(loadConP); 6204 %} 6205 #endif // _LP64 6206 6207 instruct loadConP0(iRegP dst, immP0 src) %{ 6208 match(Set dst src); 6209 6210 size(4); 6211 format %{ "CLR $dst\t!ptr" %} 6212 ins_encode %{ 6213 __ clr($dst$$Register); 6214 %} 6215 ins_pipe(ialu_imm); 6216 %} 6217 6218 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 6219 match(Set dst src); 6220 ins_cost(DEFAULT_COST); 6221 format %{ "SET $src,$dst\t!ptr" %} 6222 ins_encode %{ 6223 AddressLiteral polling_page(os::get_polling_page()); 6224 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 6225 %} 6226 ins_pipe(loadConP_poll); 6227 %} 6228 6229 instruct loadConN0(iRegN dst, immN0 src) %{ 6230 match(Set dst src); 6231 6232 size(4); 6233 format %{ "CLR $dst\t! compressed NULL ptr" %} 6234 ins_encode %{ 6235 __ clr($dst$$Register); 6236 %} 6237 ins_pipe(ialu_imm); 6238 %} 6239 6240 instruct loadConN(iRegN dst, immN src) %{ 6241 match(Set dst src); 6242 ins_cost(DEFAULT_COST * 3/2); 6243 format %{ "SET $src,$dst\t! compressed ptr" %} 6244 ins_encode %{ 6245 Register dst = $dst$$Register; 6246 __ set_narrow_oop((jobject)$src$$constant, dst); 6247 %} 6248 ins_pipe(ialu_hi_lo_reg); 6249 %} 6250 6251 instruct loadConNKlass(iRegN dst, immNKlass src) %{ 6252 match(Set dst src); 6253 ins_cost(DEFAULT_COST * 3/2); 6254 format %{ "SET $src,$dst\t! compressed klass ptr" %} 6255 ins_encode %{ 6256 Register dst = $dst$$Register; 6257 __ set_narrow_klass((Klass*)$src$$constant, dst); 6258 %} 6259 ins_pipe(ialu_hi_lo_reg); 6260 %} 6261 6262 // Materialize long value (predicated by immL_cheap). 6263 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 6264 match(Set dst con); 6265 effect(KILL tmp); 6266 ins_cost(DEFAULT_COST * 3); 6267 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 6268 ins_encode %{ 6269 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 6270 %} 6271 ins_pipe(loadConL); 6272 %} 6273 6274 // Load long value from constant table (predicated by immL_expensive). 6275 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 6276 match(Set dst con); 6277 ins_cost(MEMORY_REF_COST); 6278 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 6279 ins_encode %{ 6280 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6281 __ ldx($constanttablebase, con_offset, $dst$$Register); 6282 %} 6283 ins_pipe(loadConL); 6284 %} 6285 6286 instruct loadConL0( iRegL dst, immL0 src ) %{ 6287 match(Set dst src); 6288 ins_cost(DEFAULT_COST); 6289 size(4); 6290 format %{ "CLR $dst\t! long" %} 6291 ins_encode( Set13( src, dst ) ); 6292 ins_pipe(ialu_imm); 6293 %} 6294 6295 instruct loadConL13( iRegL dst, immL13 src ) %{ 6296 match(Set dst src); 6297 ins_cost(DEFAULT_COST * 2); 6298 6299 size(4); 6300 format %{ "MOV $src,$dst\t! long" %} 6301 ins_encode( Set13( src, dst ) ); 6302 ins_pipe(ialu_imm); 6303 %} 6304 6305 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 6306 match(Set dst con); 6307 effect(KILL tmp); 6308 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 6309 ins_encode %{ 6310 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6311 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 6312 %} 6313 ins_pipe(loadConFD); 6314 %} 6315 6316 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 6317 match(Set dst con); 6318 effect(KILL tmp); 6319 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 6320 ins_encode %{ 6321 // XXX This is a quick fix for 6833573. 6322 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 6323 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6324 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 6325 %} 6326 ins_pipe(loadConFD); 6327 %} 6328 6329 // Prefetch instructions for allocation. 6330 // Must be safe to execute with invalid address (cannot fault). 6331 6332 instruct prefetchAlloc( memory mem ) %{ 6333 predicate(AllocatePrefetchInstr == 0); 6334 match( PrefetchAllocation mem ); 6335 ins_cost(MEMORY_REF_COST); 6336 size(4); 6337 6338 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 6339 opcode(Assembler::prefetch_op3); 6340 ins_encode( form3_mem_prefetch_write( mem ) ); 6341 ins_pipe(iload_mem); 6342 %} 6343 6344 // Use BIS instruction to prefetch for allocation. 6345 // Could fault, need space at the end of TLAB. 6346 instruct prefetchAlloc_bis( iRegP dst ) %{ 6347 predicate(AllocatePrefetchInstr == 1); 6348 match( PrefetchAllocation dst ); 6349 ins_cost(MEMORY_REF_COST); 6350 size(4); 6351 6352 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6353 ins_encode %{ 6354 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6355 %} 6356 ins_pipe(istore_mem_reg); 6357 %} 6358 6359 // Next code is used for finding next cache line address to prefetch. 6360 #ifndef _LP64 6361 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{ 6362 match(Set dst (CastX2P (AndI (CastP2X src) mask))); 6363 ins_cost(DEFAULT_COST); 6364 size(4); 6365 6366 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6367 ins_encode %{ 6368 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6369 %} 6370 ins_pipe(ialu_reg_imm); 6371 %} 6372 #else 6373 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6374 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6375 ins_cost(DEFAULT_COST); 6376 size(4); 6377 6378 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6379 ins_encode %{ 6380 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6381 %} 6382 ins_pipe(ialu_reg_imm); 6383 %} 6384 #endif 6385 6386 //----------Store Instructions------------------------------------------------- 6387 // Store Byte 6388 instruct storeB(memory mem, iRegI src) %{ 6389 match(Set mem (StoreB mem src)); 6390 ins_cost(MEMORY_REF_COST); 6391 6392 size(4); 6393 format %{ "STB $src,$mem\t! byte" %} 6394 opcode(Assembler::stb_op3); 6395 ins_encode(simple_form3_mem_reg( mem, src ) ); 6396 ins_pipe(istore_mem_reg); 6397 %} 6398 6399 instruct storeB0(memory mem, immI0 src) %{ 6400 match(Set mem (StoreB mem src)); 6401 ins_cost(MEMORY_REF_COST); 6402 6403 size(4); 6404 format %{ "STB $src,$mem\t! byte" %} 6405 opcode(Assembler::stb_op3); 6406 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6407 ins_pipe(istore_mem_zero); 6408 %} 6409 6410 instruct storeCM0(memory mem, immI0 src) %{ 6411 match(Set mem (StoreCM mem src)); 6412 ins_cost(MEMORY_REF_COST); 6413 6414 size(4); 6415 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6416 opcode(Assembler::stb_op3); 6417 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6418 ins_pipe(istore_mem_zero); 6419 %} 6420 6421 // Store Char/Short 6422 instruct storeC(memory mem, iRegI src) %{ 6423 match(Set mem (StoreC mem src)); 6424 ins_cost(MEMORY_REF_COST); 6425 6426 size(4); 6427 format %{ "STH $src,$mem\t! short" %} 6428 opcode(Assembler::sth_op3); 6429 ins_encode(simple_form3_mem_reg( mem, src ) ); 6430 ins_pipe(istore_mem_reg); 6431 %} 6432 6433 instruct storeC0(memory mem, immI0 src) %{ 6434 match(Set mem (StoreC mem src)); 6435 ins_cost(MEMORY_REF_COST); 6436 6437 size(4); 6438 format %{ "STH $src,$mem\t! short" %} 6439 opcode(Assembler::sth_op3); 6440 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6441 ins_pipe(istore_mem_zero); 6442 %} 6443 6444 // Store Integer 6445 instruct storeI(memory mem, iRegI src) %{ 6446 match(Set mem (StoreI mem src)); 6447 ins_cost(MEMORY_REF_COST); 6448 6449 size(4); 6450 format %{ "STW $src,$mem" %} 6451 opcode(Assembler::stw_op3); 6452 ins_encode(simple_form3_mem_reg( mem, src ) ); 6453 ins_pipe(istore_mem_reg); 6454 %} 6455 6456 // Store Long 6457 instruct storeL(memory mem, iRegL src) %{ 6458 match(Set mem (StoreL mem src)); 6459 ins_cost(MEMORY_REF_COST); 6460 size(4); 6461 format %{ "STX $src,$mem\t! long" %} 6462 opcode(Assembler::stx_op3); 6463 ins_encode(simple_form3_mem_reg( mem, src ) ); 6464 ins_pipe(istore_mem_reg); 6465 %} 6466 6467 instruct storeI0(memory mem, immI0 src) %{ 6468 match(Set mem (StoreI mem src)); 6469 ins_cost(MEMORY_REF_COST); 6470 6471 size(4); 6472 format %{ "STW $src,$mem" %} 6473 opcode(Assembler::stw_op3); 6474 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6475 ins_pipe(istore_mem_zero); 6476 %} 6477 6478 instruct storeL0(memory mem, immL0 src) %{ 6479 match(Set mem (StoreL mem src)); 6480 ins_cost(MEMORY_REF_COST); 6481 6482 size(4); 6483 format %{ "STX $src,$mem" %} 6484 opcode(Assembler::stx_op3); 6485 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6486 ins_pipe(istore_mem_zero); 6487 %} 6488 6489 // Store Integer from float register (used after fstoi) 6490 instruct storeI_Freg(memory mem, regF src) %{ 6491 match(Set mem (StoreI mem src)); 6492 ins_cost(MEMORY_REF_COST); 6493 6494 size(4); 6495 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6496 opcode(Assembler::stf_op3); 6497 ins_encode(simple_form3_mem_reg( mem, src ) ); 6498 ins_pipe(fstoreF_mem_reg); 6499 %} 6500 6501 // Store Pointer 6502 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6503 match(Set dst (StoreP dst src)); 6504 ins_cost(MEMORY_REF_COST); 6505 size(4); 6506 6507 #ifndef _LP64 6508 format %{ "STW $src,$dst\t! ptr" %} 6509 opcode(Assembler::stw_op3, 0, REGP_OP); 6510 #else 6511 format %{ "STX $src,$dst\t! ptr" %} 6512 opcode(Assembler::stx_op3, 0, REGP_OP); 6513 #endif 6514 ins_encode( form3_mem_reg( dst, src ) ); 6515 ins_pipe(istore_mem_spORreg); 6516 %} 6517 6518 instruct storeP0(memory dst, immP0 src) %{ 6519 match(Set dst (StoreP dst src)); 6520 ins_cost(MEMORY_REF_COST); 6521 size(4); 6522 6523 #ifndef _LP64 6524 format %{ "STW $src,$dst\t! ptr" %} 6525 opcode(Assembler::stw_op3, 0, REGP_OP); 6526 #else 6527 format %{ "STX $src,$dst\t! ptr" %} 6528 opcode(Assembler::stx_op3, 0, REGP_OP); 6529 #endif 6530 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6531 ins_pipe(istore_mem_zero); 6532 %} 6533 6534 // Store Compressed Pointer 6535 instruct storeN(memory dst, iRegN src) %{ 6536 match(Set dst (StoreN dst src)); 6537 ins_cost(MEMORY_REF_COST); 6538 size(4); 6539 6540 format %{ "STW $src,$dst\t! compressed ptr" %} 6541 ins_encode %{ 6542 Register base = as_Register($dst$$base); 6543 Register index = as_Register($dst$$index); 6544 Register src = $src$$Register; 6545 if (index != G0) { 6546 __ stw(src, base, index); 6547 } else { 6548 __ stw(src, base, $dst$$disp); 6549 } 6550 %} 6551 ins_pipe(istore_mem_spORreg); 6552 %} 6553 6554 instruct storeNKlass(memory dst, iRegN src) %{ 6555 match(Set dst (StoreNKlass dst src)); 6556 ins_cost(MEMORY_REF_COST); 6557 size(4); 6558 6559 format %{ "STW $src,$dst\t! compressed klass ptr" %} 6560 ins_encode %{ 6561 Register base = as_Register($dst$$base); 6562 Register index = as_Register($dst$$index); 6563 Register src = $src$$Register; 6564 if (index != G0) { 6565 __ stw(src, base, index); 6566 } else { 6567 __ stw(src, base, $dst$$disp); 6568 } 6569 %} 6570 ins_pipe(istore_mem_spORreg); 6571 %} 6572 6573 instruct storeN0(memory dst, immN0 src) %{ 6574 match(Set dst (StoreN dst src)); 6575 ins_cost(MEMORY_REF_COST); 6576 size(4); 6577 6578 format %{ "STW $src,$dst\t! compressed ptr" %} 6579 ins_encode %{ 6580 Register base = as_Register($dst$$base); 6581 Register index = as_Register($dst$$index); 6582 if (index != G0) { 6583 __ stw(0, base, index); 6584 } else { 6585 __ stw(0, base, $dst$$disp); 6586 } 6587 %} 6588 ins_pipe(istore_mem_zero); 6589 %} 6590 6591 // Store Double 6592 instruct storeD( memory mem, regD src) %{ 6593 match(Set mem (StoreD mem src)); 6594 ins_cost(MEMORY_REF_COST); 6595 6596 size(4); 6597 format %{ "STDF $src,$mem" %} 6598 opcode(Assembler::stdf_op3); 6599 ins_encode(simple_form3_mem_reg( mem, src ) ); 6600 ins_pipe(fstoreD_mem_reg); 6601 %} 6602 6603 instruct storeD0( memory mem, immD0 src) %{ 6604 match(Set mem (StoreD mem src)); 6605 ins_cost(MEMORY_REF_COST); 6606 6607 size(4); 6608 format %{ "STX $src,$mem" %} 6609 opcode(Assembler::stx_op3); 6610 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6611 ins_pipe(fstoreD_mem_zero); 6612 %} 6613 6614 // Store Float 6615 instruct storeF( memory mem, regF src) %{ 6616 match(Set mem (StoreF mem src)); 6617 ins_cost(MEMORY_REF_COST); 6618 6619 size(4); 6620 format %{ "STF $src,$mem" %} 6621 opcode(Assembler::stf_op3); 6622 ins_encode(simple_form3_mem_reg( mem, src ) ); 6623 ins_pipe(fstoreF_mem_reg); 6624 %} 6625 6626 instruct storeF0( memory mem, immF0 src) %{ 6627 match(Set mem (StoreF mem src)); 6628 ins_cost(MEMORY_REF_COST); 6629 6630 size(4); 6631 format %{ "STW $src,$mem\t! storeF0" %} 6632 opcode(Assembler::stw_op3); 6633 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6634 ins_pipe(fstoreF_mem_zero); 6635 %} 6636 6637 // Convert oop pointer into compressed form 6638 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6639 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6640 match(Set dst (EncodeP src)); 6641 format %{ "encode_heap_oop $src, $dst" %} 6642 ins_encode %{ 6643 __ encode_heap_oop($src$$Register, $dst$$Register); 6644 %} 6645 ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE); 6646 ins_pipe(ialu_reg); 6647 %} 6648 6649 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6650 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6651 match(Set dst (EncodeP src)); 6652 format %{ "encode_heap_oop_not_null $src, $dst" %} 6653 ins_encode %{ 6654 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6655 %} 6656 ins_pipe(ialu_reg); 6657 %} 6658 6659 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6660 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6661 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6662 match(Set dst (DecodeN src)); 6663 format %{ "decode_heap_oop $src, $dst" %} 6664 ins_encode %{ 6665 __ decode_heap_oop($src$$Register, $dst$$Register); 6666 %} 6667 ins_pipe(ialu_reg); 6668 %} 6669 6670 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6671 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6672 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6673 match(Set dst (DecodeN src)); 6674 format %{ "decode_heap_oop_not_null $src, $dst" %} 6675 ins_encode %{ 6676 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6677 %} 6678 ins_pipe(ialu_reg); 6679 %} 6680 6681 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{ 6682 match(Set dst (EncodePKlass src)); 6683 format %{ "encode_klass_not_null $src, $dst" %} 6684 ins_encode %{ 6685 __ encode_klass_not_null($src$$Register, $dst$$Register); 6686 %} 6687 ins_pipe(ialu_reg); 6688 %} 6689 6690 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ 6691 match(Set dst (DecodeNKlass src)); 6692 format %{ "decode_klass_not_null $src, $dst" %} 6693 ins_encode %{ 6694 __ decode_klass_not_null($src$$Register, $dst$$Register); 6695 %} 6696 ins_pipe(ialu_reg); 6697 %} 6698 6699 //----------MemBar Instructions----------------------------------------------- 6700 // Memory barrier flavors 6701 6702 instruct membar_acquire() %{ 6703 match(MemBarAcquire); 6704 match(LoadFence); 6705 ins_cost(4*MEMORY_REF_COST); 6706 6707 size(0); 6708 format %{ "MEMBAR-acquire" %} 6709 ins_encode( enc_membar_acquire ); 6710 ins_pipe(long_memory_op); 6711 %} 6712 6713 instruct membar_acquire_lock() %{ 6714 match(MemBarAcquireLock); 6715 ins_cost(0); 6716 6717 size(0); 6718 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6719 ins_encode( ); 6720 ins_pipe(empty); 6721 %} 6722 6723 instruct membar_release() %{ 6724 match(MemBarRelease); 6725 match(StoreFence); 6726 ins_cost(4*MEMORY_REF_COST); 6727 6728 size(0); 6729 format %{ "MEMBAR-release" %} 6730 ins_encode( enc_membar_release ); 6731 ins_pipe(long_memory_op); 6732 %} 6733 6734 instruct membar_release_lock() %{ 6735 match(MemBarReleaseLock); 6736 ins_cost(0); 6737 6738 size(0); 6739 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6740 ins_encode( ); 6741 ins_pipe(empty); 6742 %} 6743 6744 instruct membar_volatile() %{ 6745 match(MemBarVolatile); 6746 ins_cost(4*MEMORY_REF_COST); 6747 6748 size(4); 6749 format %{ "MEMBAR-volatile" %} 6750 ins_encode( enc_membar_volatile ); 6751 ins_pipe(long_memory_op); 6752 %} 6753 6754 instruct unnecessary_membar_volatile() %{ 6755 match(MemBarVolatile); 6756 predicate(Matcher::post_store_load_barrier(n)); 6757 ins_cost(0); 6758 6759 size(0); 6760 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6761 ins_encode( ); 6762 ins_pipe(empty); 6763 %} 6764 6765 instruct membar_storestore() %{ 6766 match(MemBarStoreStore); 6767 ins_cost(0); 6768 6769 size(0); 6770 format %{ "!MEMBAR-storestore (empty encoding)" %} 6771 ins_encode( ); 6772 ins_pipe(empty); 6773 %} 6774 6775 //----------Register Move Instructions----------------------------------------- 6776 instruct roundDouble_nop(regD dst) %{ 6777 match(Set dst (RoundDouble dst)); 6778 ins_cost(0); 6779 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6780 ins_encode( ); 6781 ins_pipe(empty); 6782 %} 6783 6784 6785 instruct roundFloat_nop(regF dst) %{ 6786 match(Set dst (RoundFloat dst)); 6787 ins_cost(0); 6788 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6789 ins_encode( ); 6790 ins_pipe(empty); 6791 %} 6792 6793 6794 // Cast Index to Pointer for unsafe natives 6795 instruct castX2P(iRegX src, iRegP dst) %{ 6796 match(Set dst (CastX2P src)); 6797 6798 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6799 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6800 ins_pipe(ialu_reg); 6801 %} 6802 6803 // Cast Pointer to Index for unsafe natives 6804 instruct castP2X(iRegP src, iRegX dst) %{ 6805 match(Set dst (CastP2X src)); 6806 6807 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6808 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6809 ins_pipe(ialu_reg); 6810 %} 6811 6812 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6813 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6814 match(Set stkSlot src); // chain rule 6815 ins_cost(MEMORY_REF_COST); 6816 format %{ "STDF $src,$stkSlot\t!stk" %} 6817 opcode(Assembler::stdf_op3); 6818 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6819 ins_pipe(fstoreD_stk_reg); 6820 %} 6821 6822 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6823 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6824 match(Set dst stkSlot); // chain rule 6825 ins_cost(MEMORY_REF_COST); 6826 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6827 opcode(Assembler::lddf_op3); 6828 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6829 ins_pipe(floadD_stk); 6830 %} 6831 6832 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6833 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6834 match(Set stkSlot src); // chain rule 6835 ins_cost(MEMORY_REF_COST); 6836 format %{ "STF $src,$stkSlot\t!stk" %} 6837 opcode(Assembler::stf_op3); 6838 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6839 ins_pipe(fstoreF_stk_reg); 6840 %} 6841 6842 //----------Conditional Move--------------------------------------------------- 6843 // Conditional move 6844 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6845 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6846 ins_cost(150); 6847 format %{ "MOV$cmp $pcc,$src,$dst" %} 6848 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6849 ins_pipe(ialu_reg); 6850 %} 6851 6852 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6853 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6854 ins_cost(140); 6855 format %{ "MOV$cmp $pcc,$src,$dst" %} 6856 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6857 ins_pipe(ialu_imm); 6858 %} 6859 6860 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6861 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6862 ins_cost(150); 6863 size(4); 6864 format %{ "MOV$cmp $icc,$src,$dst" %} 6865 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6866 ins_pipe(ialu_reg); 6867 %} 6868 6869 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6870 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6871 ins_cost(140); 6872 size(4); 6873 format %{ "MOV$cmp $icc,$src,$dst" %} 6874 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6875 ins_pipe(ialu_imm); 6876 %} 6877 6878 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6879 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6880 ins_cost(150); 6881 size(4); 6882 format %{ "MOV$cmp $icc,$src,$dst" %} 6883 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6884 ins_pipe(ialu_reg); 6885 %} 6886 6887 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6888 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6889 ins_cost(140); 6890 size(4); 6891 format %{ "MOV$cmp $icc,$src,$dst" %} 6892 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6893 ins_pipe(ialu_imm); 6894 %} 6895 6896 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6897 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6898 ins_cost(150); 6899 size(4); 6900 format %{ "MOV$cmp $fcc,$src,$dst" %} 6901 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6902 ins_pipe(ialu_reg); 6903 %} 6904 6905 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6906 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6907 ins_cost(140); 6908 size(4); 6909 format %{ "MOV$cmp $fcc,$src,$dst" %} 6910 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6911 ins_pipe(ialu_imm); 6912 %} 6913 6914 // Conditional move for RegN. Only cmov(reg,reg). 6915 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6916 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6917 ins_cost(150); 6918 format %{ "MOV$cmp $pcc,$src,$dst" %} 6919 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6920 ins_pipe(ialu_reg); 6921 %} 6922 6923 // This instruction also works with CmpN so we don't need cmovNN_reg. 6924 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6925 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6926 ins_cost(150); 6927 size(4); 6928 format %{ "MOV$cmp $icc,$src,$dst" %} 6929 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6930 ins_pipe(ialu_reg); 6931 %} 6932 6933 // This instruction also works with CmpN so we don't need cmovNN_reg. 6934 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6935 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6936 ins_cost(150); 6937 size(4); 6938 format %{ "MOV$cmp $icc,$src,$dst" %} 6939 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6940 ins_pipe(ialu_reg); 6941 %} 6942 6943 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6944 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6945 ins_cost(150); 6946 size(4); 6947 format %{ "MOV$cmp $fcc,$src,$dst" %} 6948 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6949 ins_pipe(ialu_reg); 6950 %} 6951 6952 // Conditional move 6953 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6954 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6955 ins_cost(150); 6956 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6957 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6958 ins_pipe(ialu_reg); 6959 %} 6960 6961 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6962 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6963 ins_cost(140); 6964 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6965 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6966 ins_pipe(ialu_imm); 6967 %} 6968 6969 // This instruction also works with CmpN so we don't need cmovPN_reg. 6970 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6971 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6972 ins_cost(150); 6973 6974 size(4); 6975 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6976 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6977 ins_pipe(ialu_reg); 6978 %} 6979 6980 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6981 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6982 ins_cost(150); 6983 6984 size(4); 6985 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6986 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6987 ins_pipe(ialu_reg); 6988 %} 6989 6990 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 6991 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6992 ins_cost(140); 6993 6994 size(4); 6995 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6996 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6997 ins_pipe(ialu_imm); 6998 %} 6999 7000 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 7001 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 7002 ins_cost(140); 7003 7004 size(4); 7005 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 7006 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 7007 ins_pipe(ialu_imm); 7008 %} 7009 7010 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 7011 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 7012 ins_cost(150); 7013 size(4); 7014 format %{ "MOV$cmp $fcc,$src,$dst" %} 7015 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7016 ins_pipe(ialu_imm); 7017 %} 7018 7019 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 7020 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 7021 ins_cost(140); 7022 size(4); 7023 format %{ "MOV$cmp $fcc,$src,$dst" %} 7024 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 7025 ins_pipe(ialu_imm); 7026 %} 7027 7028 // Conditional move 7029 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 7030 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 7031 ins_cost(150); 7032 opcode(0x101); 7033 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7034 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7035 ins_pipe(int_conditional_float_move); 7036 %} 7037 7038 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 7039 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 7040 ins_cost(150); 7041 7042 size(4); 7043 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7044 opcode(0x101); 7045 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7046 ins_pipe(int_conditional_float_move); 7047 %} 7048 7049 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 7050 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 7051 ins_cost(150); 7052 7053 size(4); 7054 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7055 opcode(0x101); 7056 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7057 ins_pipe(int_conditional_float_move); 7058 %} 7059 7060 // Conditional move, 7061 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 7062 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 7063 ins_cost(150); 7064 size(4); 7065 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 7066 opcode(0x1); 7067 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7068 ins_pipe(int_conditional_double_move); 7069 %} 7070 7071 // Conditional move 7072 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 7073 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 7074 ins_cost(150); 7075 size(4); 7076 opcode(0x102); 7077 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7078 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7079 ins_pipe(int_conditional_double_move); 7080 %} 7081 7082 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 7083 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7084 ins_cost(150); 7085 7086 size(4); 7087 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7088 opcode(0x102); 7089 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7090 ins_pipe(int_conditional_double_move); 7091 %} 7092 7093 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 7094 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7095 ins_cost(150); 7096 7097 size(4); 7098 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7099 opcode(0x102); 7100 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7101 ins_pipe(int_conditional_double_move); 7102 %} 7103 7104 // Conditional move, 7105 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 7106 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 7107 ins_cost(150); 7108 size(4); 7109 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 7110 opcode(0x2); 7111 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7112 ins_pipe(int_conditional_double_move); 7113 %} 7114 7115 // Conditional move 7116 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 7117 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7118 ins_cost(150); 7119 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7120 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7121 ins_pipe(ialu_reg); 7122 %} 7123 7124 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 7125 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7126 ins_cost(140); 7127 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7128 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 7129 ins_pipe(ialu_imm); 7130 %} 7131 7132 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 7133 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7134 ins_cost(150); 7135 7136 size(4); 7137 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7138 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7139 ins_pipe(ialu_reg); 7140 %} 7141 7142 7143 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 7144 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7145 ins_cost(150); 7146 7147 size(4); 7148 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7149 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7150 ins_pipe(ialu_reg); 7151 %} 7152 7153 7154 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 7155 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 7156 ins_cost(150); 7157 7158 size(4); 7159 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 7160 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7161 ins_pipe(ialu_reg); 7162 %} 7163 7164 7165 7166 //----------OS and Locking Instructions---------------------------------------- 7167 7168 // This name is KNOWN by the ADLC and cannot be changed. 7169 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 7170 // for this guy. 7171 instruct tlsLoadP(g2RegP dst) %{ 7172 match(Set dst (ThreadLocal)); 7173 7174 size(0); 7175 ins_cost(0); 7176 format %{ "# TLS is in G2" %} 7177 ins_encode( /*empty encoding*/ ); 7178 ins_pipe(ialu_none); 7179 %} 7180 7181 instruct checkCastPP( iRegP dst ) %{ 7182 match(Set dst (CheckCastPP dst)); 7183 7184 size(0); 7185 format %{ "# checkcastPP of $dst" %} 7186 ins_encode( /*empty encoding*/ ); 7187 ins_pipe(empty); 7188 %} 7189 7190 7191 instruct castPP( iRegP dst ) %{ 7192 match(Set dst (CastPP dst)); 7193 format %{ "# castPP of $dst" %} 7194 ins_encode( /*empty encoding*/ ); 7195 ins_pipe(empty); 7196 %} 7197 7198 instruct castII( iRegI dst ) %{ 7199 match(Set dst (CastII dst)); 7200 format %{ "# castII of $dst" %} 7201 ins_encode( /*empty encoding*/ ); 7202 ins_cost(0); 7203 ins_pipe(empty); 7204 %} 7205 7206 //----------Arithmetic Instructions-------------------------------------------- 7207 // Addition Instructions 7208 // Register Addition 7209 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7210 match(Set dst (AddI src1 src2)); 7211 7212 size(4); 7213 format %{ "ADD $src1,$src2,$dst" %} 7214 ins_encode %{ 7215 __ add($src1$$Register, $src2$$Register, $dst$$Register); 7216 %} 7217 ins_pipe(ialu_reg_reg); 7218 %} 7219 7220 // Immediate Addition 7221 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7222 match(Set dst (AddI src1 src2)); 7223 7224 size(4); 7225 format %{ "ADD $src1,$src2,$dst" %} 7226 opcode(Assembler::add_op3, Assembler::arith_op); 7227 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7228 ins_pipe(ialu_reg_imm); 7229 %} 7230 7231 // Pointer Register Addition 7232 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 7233 match(Set dst (AddP src1 src2)); 7234 7235 size(4); 7236 format %{ "ADD $src1,$src2,$dst" %} 7237 opcode(Assembler::add_op3, Assembler::arith_op); 7238 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7239 ins_pipe(ialu_reg_reg); 7240 %} 7241 7242 // Pointer Immediate Addition 7243 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 7244 match(Set dst (AddP src1 src2)); 7245 7246 size(4); 7247 format %{ "ADD $src1,$src2,$dst" %} 7248 opcode(Assembler::add_op3, Assembler::arith_op); 7249 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7250 ins_pipe(ialu_reg_imm); 7251 %} 7252 7253 // Long Addition 7254 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7255 match(Set dst (AddL src1 src2)); 7256 7257 size(4); 7258 format %{ "ADD $src1,$src2,$dst\t! long" %} 7259 opcode(Assembler::add_op3, Assembler::arith_op); 7260 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7261 ins_pipe(ialu_reg_reg); 7262 %} 7263 7264 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7265 match(Set dst (AddL src1 con)); 7266 7267 size(4); 7268 format %{ "ADD $src1,$con,$dst" %} 7269 opcode(Assembler::add_op3, Assembler::arith_op); 7270 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7271 ins_pipe(ialu_reg_imm); 7272 %} 7273 7274 //----------Conditional_store-------------------------------------------------- 7275 // Conditional-store of the updated heap-top. 7276 // Used during allocation of the shared heap. 7277 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 7278 7279 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 7280 instruct loadPLocked(iRegP dst, memory mem) %{ 7281 match(Set dst (LoadPLocked mem)); 7282 ins_cost(MEMORY_REF_COST); 7283 7284 #ifndef _LP64 7285 size(4); 7286 format %{ "LDUW $mem,$dst\t! ptr" %} 7287 opcode(Assembler::lduw_op3, 0, REGP_OP); 7288 #else 7289 format %{ "LDX $mem,$dst\t! ptr" %} 7290 opcode(Assembler::ldx_op3, 0, REGP_OP); 7291 #endif 7292 ins_encode( form3_mem_reg( mem, dst ) ); 7293 ins_pipe(iload_mem); 7294 %} 7295 7296 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 7297 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 7298 effect( KILL newval ); 7299 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 7300 "CMP R_G3,$oldval\t\t! See if we made progress" %} 7301 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 7302 ins_pipe( long_memory_op ); 7303 %} 7304 7305 // Conditional-store of an int value. 7306 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 7307 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 7308 effect( KILL newval ); 7309 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7310 "CMP $oldval,$newval\t\t! See if we made progress" %} 7311 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7312 ins_pipe( long_memory_op ); 7313 %} 7314 7315 // Conditional-store of a long value. 7316 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 7317 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 7318 effect( KILL newval ); 7319 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7320 "CMP $oldval,$newval\t\t! See if we made progress" %} 7321 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7322 ins_pipe( long_memory_op ); 7323 %} 7324 7325 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7326 7327 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7328 predicate(VM_Version::supports_cx8()); 7329 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7330 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7331 format %{ 7332 "MOV $newval,O7\n\t" 7333 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7334 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7335 "MOV 1,$res\n\t" 7336 "MOVne xcc,R_G0,$res" 7337 %} 7338 ins_encode( enc_casx(mem_ptr, oldval, newval), 7339 enc_lflags_ne_to_boolean(res) ); 7340 ins_pipe( long_memory_op ); 7341 %} 7342 7343 7344 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7345 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7346 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7347 format %{ 7348 "MOV $newval,O7\n\t" 7349 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7350 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7351 "MOV 1,$res\n\t" 7352 "MOVne icc,R_G0,$res" 7353 %} 7354 ins_encode( enc_casi(mem_ptr, oldval, newval), 7355 enc_iflags_ne_to_boolean(res) ); 7356 ins_pipe( long_memory_op ); 7357 %} 7358 7359 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7360 #ifdef _LP64 7361 predicate(VM_Version::supports_cx8()); 7362 #endif 7363 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7364 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7365 format %{ 7366 "MOV $newval,O7\n\t" 7367 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7368 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7369 "MOV 1,$res\n\t" 7370 "MOVne xcc,R_G0,$res" 7371 %} 7372 #ifdef _LP64 7373 ins_encode( enc_casx(mem_ptr, oldval, newval), 7374 enc_lflags_ne_to_boolean(res) ); 7375 #else 7376 ins_encode( enc_casi(mem_ptr, oldval, newval), 7377 enc_iflags_ne_to_boolean(res) ); 7378 #endif 7379 ins_pipe( long_memory_op ); 7380 %} 7381 7382 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7383 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7384 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7385 format %{ 7386 "MOV $newval,O7\n\t" 7387 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7388 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7389 "MOV 1,$res\n\t" 7390 "MOVne icc,R_G0,$res" 7391 %} 7392 ins_encode( enc_casi(mem_ptr, oldval, newval), 7393 enc_iflags_ne_to_boolean(res) ); 7394 ins_pipe( long_memory_op ); 7395 %} 7396 7397 instruct xchgI( memory mem, iRegI newval) %{ 7398 match(Set newval (GetAndSetI mem newval)); 7399 format %{ "SWAP [$mem],$newval" %} 7400 size(4); 7401 ins_encode %{ 7402 __ swap($mem$$Address, $newval$$Register); 7403 %} 7404 ins_pipe( long_memory_op ); 7405 %} 7406 7407 #ifndef _LP64 7408 instruct xchgP( memory mem, iRegP newval) %{ 7409 match(Set newval (GetAndSetP mem newval)); 7410 format %{ "SWAP [$mem],$newval" %} 7411 size(4); 7412 ins_encode %{ 7413 __ swap($mem$$Address, $newval$$Register); 7414 %} 7415 ins_pipe( long_memory_op ); 7416 %} 7417 #endif 7418 7419 instruct xchgN( memory mem, iRegN newval) %{ 7420 match(Set newval (GetAndSetN mem newval)); 7421 format %{ "SWAP [$mem],$newval" %} 7422 size(4); 7423 ins_encode %{ 7424 __ swap($mem$$Address, $newval$$Register); 7425 %} 7426 ins_pipe( long_memory_op ); 7427 %} 7428 7429 //--------------------- 7430 // Subtraction Instructions 7431 // Register Subtraction 7432 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7433 match(Set dst (SubI src1 src2)); 7434 7435 size(4); 7436 format %{ "SUB $src1,$src2,$dst" %} 7437 opcode(Assembler::sub_op3, Assembler::arith_op); 7438 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7439 ins_pipe(ialu_reg_reg); 7440 %} 7441 7442 // Immediate Subtraction 7443 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7444 match(Set dst (SubI src1 src2)); 7445 7446 size(4); 7447 format %{ "SUB $src1,$src2,$dst" %} 7448 opcode(Assembler::sub_op3, Assembler::arith_op); 7449 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7450 ins_pipe(ialu_reg_imm); 7451 %} 7452 7453 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7454 match(Set dst (SubI zero src2)); 7455 7456 size(4); 7457 format %{ "NEG $src2,$dst" %} 7458 opcode(Assembler::sub_op3, Assembler::arith_op); 7459 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7460 ins_pipe(ialu_zero_reg); 7461 %} 7462 7463 // Long subtraction 7464 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7465 match(Set dst (SubL src1 src2)); 7466 7467 size(4); 7468 format %{ "SUB $src1,$src2,$dst\t! long" %} 7469 opcode(Assembler::sub_op3, Assembler::arith_op); 7470 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7471 ins_pipe(ialu_reg_reg); 7472 %} 7473 7474 // Immediate Subtraction 7475 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7476 match(Set dst (SubL src1 con)); 7477 7478 size(4); 7479 format %{ "SUB $src1,$con,$dst\t! long" %} 7480 opcode(Assembler::sub_op3, Assembler::arith_op); 7481 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7482 ins_pipe(ialu_reg_imm); 7483 %} 7484 7485 // Long negation 7486 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7487 match(Set dst (SubL zero src2)); 7488 7489 size(4); 7490 format %{ "NEG $src2,$dst\t! long" %} 7491 opcode(Assembler::sub_op3, Assembler::arith_op); 7492 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7493 ins_pipe(ialu_zero_reg); 7494 %} 7495 7496 // Multiplication Instructions 7497 // Integer Multiplication 7498 // Register Multiplication 7499 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7500 match(Set dst (MulI src1 src2)); 7501 7502 size(4); 7503 format %{ "MULX $src1,$src2,$dst" %} 7504 opcode(Assembler::mulx_op3, Assembler::arith_op); 7505 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7506 ins_pipe(imul_reg_reg); 7507 %} 7508 7509 // Immediate Multiplication 7510 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7511 match(Set dst (MulI src1 src2)); 7512 7513 size(4); 7514 format %{ "MULX $src1,$src2,$dst" %} 7515 opcode(Assembler::mulx_op3, Assembler::arith_op); 7516 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7517 ins_pipe(imul_reg_imm); 7518 %} 7519 7520 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7521 match(Set dst (MulL src1 src2)); 7522 ins_cost(DEFAULT_COST * 5); 7523 size(4); 7524 format %{ "MULX $src1,$src2,$dst\t! long" %} 7525 opcode(Assembler::mulx_op3, Assembler::arith_op); 7526 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7527 ins_pipe(mulL_reg_reg); 7528 %} 7529 7530 // Immediate Multiplication 7531 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7532 match(Set dst (MulL src1 src2)); 7533 ins_cost(DEFAULT_COST * 5); 7534 size(4); 7535 format %{ "MULX $src1,$src2,$dst" %} 7536 opcode(Assembler::mulx_op3, Assembler::arith_op); 7537 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7538 ins_pipe(mulL_reg_imm); 7539 %} 7540 7541 // Integer Division 7542 // Register Division 7543 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7544 match(Set dst (DivI src1 src2)); 7545 ins_cost((2+71)*DEFAULT_COST); 7546 7547 format %{ "SRA $src2,0,$src2\n\t" 7548 "SRA $src1,0,$src1\n\t" 7549 "SDIVX $src1,$src2,$dst" %} 7550 ins_encode( idiv_reg( src1, src2, dst ) ); 7551 ins_pipe(sdiv_reg_reg); 7552 %} 7553 7554 // Immediate Division 7555 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7556 match(Set dst (DivI src1 src2)); 7557 ins_cost((2+71)*DEFAULT_COST); 7558 7559 format %{ "SRA $src1,0,$src1\n\t" 7560 "SDIVX $src1,$src2,$dst" %} 7561 ins_encode( idiv_imm( src1, src2, dst ) ); 7562 ins_pipe(sdiv_reg_imm); 7563 %} 7564 7565 //----------Div-By-10-Expansion------------------------------------------------ 7566 // Extract hi bits of a 32x32->64 bit multiply. 7567 // Expand rule only, not matched 7568 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7569 effect( DEF dst, USE src1, USE src2 ); 7570 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7571 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7572 ins_encode( enc_mul_hi(dst,src1,src2)); 7573 ins_pipe(sdiv_reg_reg); 7574 %} 7575 7576 // Magic constant, reciprocal of 10 7577 instruct loadConI_x66666667(iRegIsafe dst) %{ 7578 effect( DEF dst ); 7579 7580 size(8); 7581 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7582 ins_encode( Set32(0x66666667, dst) ); 7583 ins_pipe(ialu_hi_lo_reg); 7584 %} 7585 7586 // Register Shift Right Arithmetic Long by 32-63 7587 instruct sra_31( iRegI dst, iRegI src ) %{ 7588 effect( DEF dst, USE src ); 7589 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7590 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7591 ins_pipe(ialu_reg_reg); 7592 %} 7593 7594 // Arithmetic Shift Right by 8-bit immediate 7595 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7596 effect( DEF dst, USE src ); 7597 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7598 opcode(Assembler::sra_op3, Assembler::arith_op); 7599 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7600 ins_pipe(ialu_reg_imm); 7601 %} 7602 7603 // Integer DIV with 10 7604 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7605 match(Set dst (DivI src div)); 7606 ins_cost((6+6)*DEFAULT_COST); 7607 expand %{ 7608 iRegIsafe tmp1; // Killed temps; 7609 iRegIsafe tmp2; // Killed temps; 7610 iRegI tmp3; // Killed temps; 7611 iRegI tmp4; // Killed temps; 7612 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7613 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7614 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7615 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7616 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7617 %} 7618 %} 7619 7620 // Register Long Division 7621 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7622 match(Set dst (DivL src1 src2)); 7623 ins_cost(DEFAULT_COST*71); 7624 size(4); 7625 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7626 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7627 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7628 ins_pipe(divL_reg_reg); 7629 %} 7630 7631 // Register Long Division 7632 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7633 match(Set dst (DivL src1 src2)); 7634 ins_cost(DEFAULT_COST*71); 7635 size(4); 7636 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7637 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7638 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7639 ins_pipe(divL_reg_imm); 7640 %} 7641 7642 // Integer Remainder 7643 // Register Remainder 7644 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7645 match(Set dst (ModI src1 src2)); 7646 effect( KILL ccr, KILL temp); 7647 7648 format %{ "SREM $src1,$src2,$dst" %} 7649 ins_encode( irem_reg(src1, src2, dst, temp) ); 7650 ins_pipe(sdiv_reg_reg); 7651 %} 7652 7653 // Immediate Remainder 7654 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7655 match(Set dst (ModI src1 src2)); 7656 effect( KILL ccr, KILL temp); 7657 7658 format %{ "SREM $src1,$src2,$dst" %} 7659 ins_encode( irem_imm(src1, src2, dst, temp) ); 7660 ins_pipe(sdiv_reg_imm); 7661 %} 7662 7663 // Register Long Remainder 7664 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7665 effect(DEF dst, USE src1, USE src2); 7666 size(4); 7667 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7668 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7669 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7670 ins_pipe(divL_reg_reg); 7671 %} 7672 7673 // Register Long Division 7674 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7675 effect(DEF dst, USE src1, USE src2); 7676 size(4); 7677 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7678 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7679 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7680 ins_pipe(divL_reg_imm); 7681 %} 7682 7683 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7684 effect(DEF dst, USE src1, USE src2); 7685 size(4); 7686 format %{ "MULX $src1,$src2,$dst\t! long" %} 7687 opcode(Assembler::mulx_op3, Assembler::arith_op); 7688 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7689 ins_pipe(mulL_reg_reg); 7690 %} 7691 7692 // Immediate Multiplication 7693 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7694 effect(DEF dst, USE src1, USE src2); 7695 size(4); 7696 format %{ "MULX $src1,$src2,$dst" %} 7697 opcode(Assembler::mulx_op3, Assembler::arith_op); 7698 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7699 ins_pipe(mulL_reg_imm); 7700 %} 7701 7702 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7703 effect(DEF dst, USE src1, USE src2); 7704 size(4); 7705 format %{ "SUB $src1,$src2,$dst\t! long" %} 7706 opcode(Assembler::sub_op3, Assembler::arith_op); 7707 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7708 ins_pipe(ialu_reg_reg); 7709 %} 7710 7711 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7712 effect(DEF dst, USE src1, USE src2); 7713 size(4); 7714 format %{ "SUB $src1,$src2,$dst\t! long" %} 7715 opcode(Assembler::sub_op3, Assembler::arith_op); 7716 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7717 ins_pipe(ialu_reg_reg); 7718 %} 7719 7720 // Register Long Remainder 7721 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7722 match(Set dst (ModL src1 src2)); 7723 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7724 expand %{ 7725 iRegL tmp1; 7726 iRegL tmp2; 7727 divL_reg_reg_1(tmp1, src1, src2); 7728 mulL_reg_reg_1(tmp2, tmp1, src2); 7729 subL_reg_reg_1(dst, src1, tmp2); 7730 %} 7731 %} 7732 7733 // Register Long Remainder 7734 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7735 match(Set dst (ModL src1 src2)); 7736 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7737 expand %{ 7738 iRegL tmp1; 7739 iRegL tmp2; 7740 divL_reg_imm13_1(tmp1, src1, src2); 7741 mulL_reg_imm13_1(tmp2, tmp1, src2); 7742 subL_reg_reg_2 (dst, src1, tmp2); 7743 %} 7744 %} 7745 7746 // Integer Shift Instructions 7747 // Register Shift Left 7748 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7749 match(Set dst (LShiftI src1 src2)); 7750 7751 size(4); 7752 format %{ "SLL $src1,$src2,$dst" %} 7753 opcode(Assembler::sll_op3, Assembler::arith_op); 7754 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7755 ins_pipe(ialu_reg_reg); 7756 %} 7757 7758 // Register Shift Left Immediate 7759 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7760 match(Set dst (LShiftI src1 src2)); 7761 7762 size(4); 7763 format %{ "SLL $src1,$src2,$dst" %} 7764 opcode(Assembler::sll_op3, Assembler::arith_op); 7765 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7766 ins_pipe(ialu_reg_imm); 7767 %} 7768 7769 // Register Shift Left 7770 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7771 match(Set dst (LShiftL src1 src2)); 7772 7773 size(4); 7774 format %{ "SLLX $src1,$src2,$dst" %} 7775 opcode(Assembler::sllx_op3, Assembler::arith_op); 7776 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7777 ins_pipe(ialu_reg_reg); 7778 %} 7779 7780 // Register Shift Left Immediate 7781 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7782 match(Set dst (LShiftL src1 src2)); 7783 7784 size(4); 7785 format %{ "SLLX $src1,$src2,$dst" %} 7786 opcode(Assembler::sllx_op3, Assembler::arith_op); 7787 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7788 ins_pipe(ialu_reg_imm); 7789 %} 7790 7791 // Register Arithmetic Shift Right 7792 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7793 match(Set dst (RShiftI src1 src2)); 7794 size(4); 7795 format %{ "SRA $src1,$src2,$dst" %} 7796 opcode(Assembler::sra_op3, Assembler::arith_op); 7797 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7798 ins_pipe(ialu_reg_reg); 7799 %} 7800 7801 // Register Arithmetic Shift Right Immediate 7802 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7803 match(Set dst (RShiftI src1 src2)); 7804 7805 size(4); 7806 format %{ "SRA $src1,$src2,$dst" %} 7807 opcode(Assembler::sra_op3, Assembler::arith_op); 7808 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7809 ins_pipe(ialu_reg_imm); 7810 %} 7811 7812 // Register Shift Right Arithmatic Long 7813 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7814 match(Set dst (RShiftL src1 src2)); 7815 7816 size(4); 7817 format %{ "SRAX $src1,$src2,$dst" %} 7818 opcode(Assembler::srax_op3, Assembler::arith_op); 7819 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7820 ins_pipe(ialu_reg_reg); 7821 %} 7822 7823 // Register Shift Left Immediate 7824 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7825 match(Set dst (RShiftL src1 src2)); 7826 7827 size(4); 7828 format %{ "SRAX $src1,$src2,$dst" %} 7829 opcode(Assembler::srax_op3, Assembler::arith_op); 7830 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7831 ins_pipe(ialu_reg_imm); 7832 %} 7833 7834 // Register Shift Right 7835 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7836 match(Set dst (URShiftI src1 src2)); 7837 7838 size(4); 7839 format %{ "SRL $src1,$src2,$dst" %} 7840 opcode(Assembler::srl_op3, Assembler::arith_op); 7841 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7842 ins_pipe(ialu_reg_reg); 7843 %} 7844 7845 // Register Shift Right Immediate 7846 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7847 match(Set dst (URShiftI src1 src2)); 7848 7849 size(4); 7850 format %{ "SRL $src1,$src2,$dst" %} 7851 opcode(Assembler::srl_op3, Assembler::arith_op); 7852 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7853 ins_pipe(ialu_reg_imm); 7854 %} 7855 7856 // Register Shift Right 7857 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7858 match(Set dst (URShiftL src1 src2)); 7859 7860 size(4); 7861 format %{ "SRLX $src1,$src2,$dst" %} 7862 opcode(Assembler::srlx_op3, Assembler::arith_op); 7863 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7864 ins_pipe(ialu_reg_reg); 7865 %} 7866 7867 // Register Shift Right Immediate 7868 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7869 match(Set dst (URShiftL src1 src2)); 7870 7871 size(4); 7872 format %{ "SRLX $src1,$src2,$dst" %} 7873 opcode(Assembler::srlx_op3, Assembler::arith_op); 7874 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7875 ins_pipe(ialu_reg_imm); 7876 %} 7877 7878 // Register Shift Right Immediate with a CastP2X 7879 #ifdef _LP64 7880 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7881 match(Set dst (URShiftL (CastP2X src1) src2)); 7882 size(4); 7883 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7884 opcode(Assembler::srlx_op3, Assembler::arith_op); 7885 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7886 ins_pipe(ialu_reg_imm); 7887 %} 7888 #else 7889 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{ 7890 match(Set dst (URShiftI (CastP2X src1) src2)); 7891 size(4); 7892 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} 7893 opcode(Assembler::srl_op3, Assembler::arith_op); 7894 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7895 ins_pipe(ialu_reg_imm); 7896 %} 7897 #endif 7898 7899 7900 //----------Floating Point Arithmetic Instructions----------------------------- 7901 7902 // Add float single precision 7903 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7904 match(Set dst (AddF src1 src2)); 7905 7906 size(4); 7907 format %{ "FADDS $src1,$src2,$dst" %} 7908 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7909 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7910 ins_pipe(faddF_reg_reg); 7911 %} 7912 7913 // Add float double precision 7914 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7915 match(Set dst (AddD src1 src2)); 7916 7917 size(4); 7918 format %{ "FADDD $src1,$src2,$dst" %} 7919 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7920 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7921 ins_pipe(faddD_reg_reg); 7922 %} 7923 7924 // Sub float single precision 7925 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7926 match(Set dst (SubF src1 src2)); 7927 7928 size(4); 7929 format %{ "FSUBS $src1,$src2,$dst" %} 7930 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7931 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7932 ins_pipe(faddF_reg_reg); 7933 %} 7934 7935 // Sub float double precision 7936 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7937 match(Set dst (SubD src1 src2)); 7938 7939 size(4); 7940 format %{ "FSUBD $src1,$src2,$dst" %} 7941 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7942 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7943 ins_pipe(faddD_reg_reg); 7944 %} 7945 7946 // Mul float single precision 7947 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7948 match(Set dst (MulF src1 src2)); 7949 7950 size(4); 7951 format %{ "FMULS $src1,$src2,$dst" %} 7952 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7953 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7954 ins_pipe(fmulF_reg_reg); 7955 %} 7956 7957 // Mul float double precision 7958 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7959 match(Set dst (MulD src1 src2)); 7960 7961 size(4); 7962 format %{ "FMULD $src1,$src2,$dst" %} 7963 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7964 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7965 ins_pipe(fmulD_reg_reg); 7966 %} 7967 7968 // Div float single precision 7969 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7970 match(Set dst (DivF src1 src2)); 7971 7972 size(4); 7973 format %{ "FDIVS $src1,$src2,$dst" %} 7974 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7975 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7976 ins_pipe(fdivF_reg_reg); 7977 %} 7978 7979 // Div float double precision 7980 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7981 match(Set dst (DivD src1 src2)); 7982 7983 size(4); 7984 format %{ "FDIVD $src1,$src2,$dst" %} 7985 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7986 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7987 ins_pipe(fdivD_reg_reg); 7988 %} 7989 7990 // Absolute float double precision 7991 instruct absD_reg(regD dst, regD src) %{ 7992 match(Set dst (AbsD src)); 7993 7994 format %{ "FABSd $src,$dst" %} 7995 ins_encode(fabsd(dst, src)); 7996 ins_pipe(faddD_reg); 7997 %} 7998 7999 // Absolute float single precision 8000 instruct absF_reg(regF dst, regF src) %{ 8001 match(Set dst (AbsF src)); 8002 8003 format %{ "FABSs $src,$dst" %} 8004 ins_encode(fabss(dst, src)); 8005 ins_pipe(faddF_reg); 8006 %} 8007 8008 instruct negF_reg(regF dst, regF src) %{ 8009 match(Set dst (NegF src)); 8010 8011 size(4); 8012 format %{ "FNEGs $src,$dst" %} 8013 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 8014 ins_encode(form3_opf_rs2F_rdF(src, dst)); 8015 ins_pipe(faddF_reg); 8016 %} 8017 8018 instruct negD_reg(regD dst, regD src) %{ 8019 match(Set dst (NegD src)); 8020 8021 format %{ "FNEGd $src,$dst" %} 8022 ins_encode(fnegd(dst, src)); 8023 ins_pipe(faddD_reg); 8024 %} 8025 8026 // Sqrt float double precision 8027 instruct sqrtF_reg_reg(regF dst, regF src) %{ 8028 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 8029 8030 size(4); 8031 format %{ "FSQRTS $src,$dst" %} 8032 ins_encode(fsqrts(dst, src)); 8033 ins_pipe(fdivF_reg_reg); 8034 %} 8035 8036 // Sqrt float double precision 8037 instruct sqrtD_reg_reg(regD dst, regD src) %{ 8038 match(Set dst (SqrtD src)); 8039 8040 size(4); 8041 format %{ "FSQRTD $src,$dst" %} 8042 ins_encode(fsqrtd(dst, src)); 8043 ins_pipe(fdivD_reg_reg); 8044 %} 8045 8046 //----------Logical Instructions----------------------------------------------- 8047 // And Instructions 8048 // Register And 8049 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8050 match(Set dst (AndI src1 src2)); 8051 8052 size(4); 8053 format %{ "AND $src1,$src2,$dst" %} 8054 opcode(Assembler::and_op3, Assembler::arith_op); 8055 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8056 ins_pipe(ialu_reg_reg); 8057 %} 8058 8059 // Immediate And 8060 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8061 match(Set dst (AndI src1 src2)); 8062 8063 size(4); 8064 format %{ "AND $src1,$src2,$dst" %} 8065 opcode(Assembler::and_op3, Assembler::arith_op); 8066 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8067 ins_pipe(ialu_reg_imm); 8068 %} 8069 8070 // Register And Long 8071 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8072 match(Set dst (AndL src1 src2)); 8073 8074 ins_cost(DEFAULT_COST); 8075 size(4); 8076 format %{ "AND $src1,$src2,$dst\t! long" %} 8077 opcode(Assembler::and_op3, Assembler::arith_op); 8078 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8079 ins_pipe(ialu_reg_reg); 8080 %} 8081 8082 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8083 match(Set dst (AndL src1 con)); 8084 8085 ins_cost(DEFAULT_COST); 8086 size(4); 8087 format %{ "AND $src1,$con,$dst\t! long" %} 8088 opcode(Assembler::and_op3, Assembler::arith_op); 8089 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8090 ins_pipe(ialu_reg_imm); 8091 %} 8092 8093 // Or Instructions 8094 // Register Or 8095 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8096 match(Set dst (OrI src1 src2)); 8097 8098 size(4); 8099 format %{ "OR $src1,$src2,$dst" %} 8100 opcode(Assembler::or_op3, Assembler::arith_op); 8101 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8102 ins_pipe(ialu_reg_reg); 8103 %} 8104 8105 // Immediate Or 8106 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8107 match(Set dst (OrI src1 src2)); 8108 8109 size(4); 8110 format %{ "OR $src1,$src2,$dst" %} 8111 opcode(Assembler::or_op3, Assembler::arith_op); 8112 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8113 ins_pipe(ialu_reg_imm); 8114 %} 8115 8116 // Register Or Long 8117 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8118 match(Set dst (OrL src1 src2)); 8119 8120 ins_cost(DEFAULT_COST); 8121 size(4); 8122 format %{ "OR $src1,$src2,$dst\t! long" %} 8123 opcode(Assembler::or_op3, Assembler::arith_op); 8124 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8125 ins_pipe(ialu_reg_reg); 8126 %} 8127 8128 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8129 match(Set dst (OrL src1 con)); 8130 ins_cost(DEFAULT_COST*2); 8131 8132 ins_cost(DEFAULT_COST); 8133 size(4); 8134 format %{ "OR $src1,$con,$dst\t! long" %} 8135 opcode(Assembler::or_op3, Assembler::arith_op); 8136 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8137 ins_pipe(ialu_reg_imm); 8138 %} 8139 8140 #ifndef _LP64 8141 8142 // Use sp_ptr_RegP to match G2 (TLS register) without spilling. 8143 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ 8144 match(Set dst (OrI src1 (CastP2X src2))); 8145 8146 size(4); 8147 format %{ "OR $src1,$src2,$dst" %} 8148 opcode(Assembler::or_op3, Assembler::arith_op); 8149 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8150 ins_pipe(ialu_reg_reg); 8151 %} 8152 8153 #else 8154 8155 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 8156 match(Set dst (OrL src1 (CastP2X src2))); 8157 8158 ins_cost(DEFAULT_COST); 8159 size(4); 8160 format %{ "OR $src1,$src2,$dst\t! long" %} 8161 opcode(Assembler::or_op3, Assembler::arith_op); 8162 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8163 ins_pipe(ialu_reg_reg); 8164 %} 8165 8166 #endif 8167 8168 // Xor Instructions 8169 // Register Xor 8170 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8171 match(Set dst (XorI src1 src2)); 8172 8173 size(4); 8174 format %{ "XOR $src1,$src2,$dst" %} 8175 opcode(Assembler::xor_op3, Assembler::arith_op); 8176 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8177 ins_pipe(ialu_reg_reg); 8178 %} 8179 8180 // Immediate Xor 8181 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8182 match(Set dst (XorI src1 src2)); 8183 8184 size(4); 8185 format %{ "XOR $src1,$src2,$dst" %} 8186 opcode(Assembler::xor_op3, Assembler::arith_op); 8187 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8188 ins_pipe(ialu_reg_imm); 8189 %} 8190 8191 // Register Xor Long 8192 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8193 match(Set dst (XorL src1 src2)); 8194 8195 ins_cost(DEFAULT_COST); 8196 size(4); 8197 format %{ "XOR $src1,$src2,$dst\t! long" %} 8198 opcode(Assembler::xor_op3, Assembler::arith_op); 8199 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8200 ins_pipe(ialu_reg_reg); 8201 %} 8202 8203 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8204 match(Set dst (XorL src1 con)); 8205 8206 ins_cost(DEFAULT_COST); 8207 size(4); 8208 format %{ "XOR $src1,$con,$dst\t! long" %} 8209 opcode(Assembler::xor_op3, Assembler::arith_op); 8210 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8211 ins_pipe(ialu_reg_imm); 8212 %} 8213 8214 //----------Convert to Boolean------------------------------------------------- 8215 // Nice hack for 32-bit tests but doesn't work for 8216 // 64-bit pointers. 8217 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 8218 match(Set dst (Conv2B src)); 8219 effect( KILL ccr ); 8220 ins_cost(DEFAULT_COST*2); 8221 format %{ "CMP R_G0,$src\n\t" 8222 "ADDX R_G0,0,$dst" %} 8223 ins_encode( enc_to_bool( src, dst ) ); 8224 ins_pipe(ialu_reg_ialu); 8225 %} 8226 8227 #ifndef _LP64 8228 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{ 8229 match(Set dst (Conv2B src)); 8230 effect( KILL ccr ); 8231 ins_cost(DEFAULT_COST*2); 8232 format %{ "CMP R_G0,$src\n\t" 8233 "ADDX R_G0,0,$dst" %} 8234 ins_encode( enc_to_bool( src, dst ) ); 8235 ins_pipe(ialu_reg_ialu); 8236 %} 8237 #else 8238 instruct convP2B( iRegI dst, iRegP src ) %{ 8239 match(Set dst (Conv2B src)); 8240 ins_cost(DEFAULT_COST*2); 8241 format %{ "MOV $src,$dst\n\t" 8242 "MOVRNZ $src,1,$dst" %} 8243 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 8244 ins_pipe(ialu_clr_and_mover); 8245 %} 8246 #endif 8247 8248 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 8249 match(Set dst (CmpLTMask src zero)); 8250 effect(KILL ccr); 8251 size(4); 8252 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 8253 ins_encode %{ 8254 __ sra($src$$Register, 31, $dst$$Register); 8255 %} 8256 ins_pipe(ialu_reg_imm); 8257 %} 8258 8259 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 8260 match(Set dst (CmpLTMask p q)); 8261 effect( KILL ccr ); 8262 ins_cost(DEFAULT_COST*4); 8263 format %{ "CMP $p,$q\n\t" 8264 "MOV #0,$dst\n\t" 8265 "BLT,a .+8\n\t" 8266 "MOV #-1,$dst" %} 8267 ins_encode( enc_ltmask(p,q,dst) ); 8268 ins_pipe(ialu_reg_reg_ialu); 8269 %} 8270 8271 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 8272 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 8273 effect(KILL ccr, TEMP tmp); 8274 ins_cost(DEFAULT_COST*3); 8275 8276 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 8277 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 8278 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 8279 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); 8280 ins_pipe(cadd_cmpltmask); 8281 %} 8282 8283 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ 8284 match(Set p (AndI (CmpLTMask p q) y)); 8285 effect(KILL ccr); 8286 ins_cost(DEFAULT_COST*3); 8287 8288 format %{ "CMP $p,$q\n\t" 8289 "MOV $y,$p\n\t" 8290 "MOVge G0,$p" %} 8291 ins_encode %{ 8292 __ cmp($p$$Register, $q$$Register); 8293 __ mov($y$$Register, $p$$Register); 8294 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); 8295 %} 8296 ins_pipe(ialu_reg_reg_ialu); 8297 %} 8298 8299 //----------------------------------------------------------------- 8300 // Direct raw moves between float and general registers using VIS3. 8301 8302 // ins_pipe(faddF_reg); 8303 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 8304 predicate(UseVIS >= 3); 8305 match(Set dst (MoveF2I src)); 8306 8307 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 8308 ins_encode %{ 8309 __ movstouw($src$$FloatRegister, $dst$$Register); 8310 %} 8311 ins_pipe(ialu_reg_reg); 8312 %} 8313 8314 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 8315 predicate(UseVIS >= 3); 8316 match(Set dst (MoveI2F src)); 8317 8318 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 8319 ins_encode %{ 8320 __ movwtos($src$$Register, $dst$$FloatRegister); 8321 %} 8322 ins_pipe(ialu_reg_reg); 8323 %} 8324 8325 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8326 predicate(UseVIS >= 3); 8327 match(Set dst (MoveD2L src)); 8328 8329 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8330 ins_encode %{ 8331 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8332 %} 8333 ins_pipe(ialu_reg_reg); 8334 %} 8335 8336 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8337 predicate(UseVIS >= 3); 8338 match(Set dst (MoveL2D src)); 8339 8340 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8341 ins_encode %{ 8342 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8343 %} 8344 ins_pipe(ialu_reg_reg); 8345 %} 8346 8347 8348 // Raw moves between float and general registers using stack. 8349 8350 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8351 match(Set dst (MoveF2I src)); 8352 effect(DEF dst, USE src); 8353 ins_cost(MEMORY_REF_COST); 8354 8355 size(4); 8356 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8357 opcode(Assembler::lduw_op3); 8358 ins_encode(simple_form3_mem_reg( src, dst ) ); 8359 ins_pipe(iload_mem); 8360 %} 8361 8362 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8363 match(Set dst (MoveI2F src)); 8364 effect(DEF dst, USE src); 8365 ins_cost(MEMORY_REF_COST); 8366 8367 size(4); 8368 format %{ "LDF $src,$dst\t! MoveI2F" %} 8369 opcode(Assembler::ldf_op3); 8370 ins_encode(simple_form3_mem_reg(src, dst)); 8371 ins_pipe(floadF_stk); 8372 %} 8373 8374 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8375 match(Set dst (MoveD2L src)); 8376 effect(DEF dst, USE src); 8377 ins_cost(MEMORY_REF_COST); 8378 8379 size(4); 8380 format %{ "LDX $src,$dst\t! MoveD2L" %} 8381 opcode(Assembler::ldx_op3); 8382 ins_encode(simple_form3_mem_reg( src, dst ) ); 8383 ins_pipe(iload_mem); 8384 %} 8385 8386 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8387 match(Set dst (MoveL2D src)); 8388 effect(DEF dst, USE src); 8389 ins_cost(MEMORY_REF_COST); 8390 8391 size(4); 8392 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8393 opcode(Assembler::lddf_op3); 8394 ins_encode(simple_form3_mem_reg(src, dst)); 8395 ins_pipe(floadD_stk); 8396 %} 8397 8398 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8399 match(Set dst (MoveF2I src)); 8400 effect(DEF dst, USE src); 8401 ins_cost(MEMORY_REF_COST); 8402 8403 size(4); 8404 format %{ "STF $src,$dst\t! MoveF2I" %} 8405 opcode(Assembler::stf_op3); 8406 ins_encode(simple_form3_mem_reg(dst, src)); 8407 ins_pipe(fstoreF_stk_reg); 8408 %} 8409 8410 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8411 match(Set dst (MoveI2F src)); 8412 effect(DEF dst, USE src); 8413 ins_cost(MEMORY_REF_COST); 8414 8415 size(4); 8416 format %{ "STW $src,$dst\t! MoveI2F" %} 8417 opcode(Assembler::stw_op3); 8418 ins_encode(simple_form3_mem_reg( dst, src ) ); 8419 ins_pipe(istore_mem_reg); 8420 %} 8421 8422 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8423 match(Set dst (MoveD2L src)); 8424 effect(DEF dst, USE src); 8425 ins_cost(MEMORY_REF_COST); 8426 8427 size(4); 8428 format %{ "STDF $src,$dst\t! MoveD2L" %} 8429 opcode(Assembler::stdf_op3); 8430 ins_encode(simple_form3_mem_reg(dst, src)); 8431 ins_pipe(fstoreD_stk_reg); 8432 %} 8433 8434 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8435 match(Set dst (MoveL2D src)); 8436 effect(DEF dst, USE src); 8437 ins_cost(MEMORY_REF_COST); 8438 8439 size(4); 8440 format %{ "STX $src,$dst\t! MoveL2D" %} 8441 opcode(Assembler::stx_op3); 8442 ins_encode(simple_form3_mem_reg( dst, src ) ); 8443 ins_pipe(istore_mem_reg); 8444 %} 8445 8446 8447 //----------Arithmetic Conversion Instructions--------------------------------- 8448 // The conversions operations are all Alpha sorted. Please keep it that way! 8449 8450 instruct convD2F_reg(regF dst, regD src) %{ 8451 match(Set dst (ConvD2F src)); 8452 size(4); 8453 format %{ "FDTOS $src,$dst" %} 8454 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8455 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8456 ins_pipe(fcvtD2F); 8457 %} 8458 8459 8460 // Convert a double to an int in a float register. 8461 // If the double is a NAN, stuff a zero in instead. 8462 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8463 effect(DEF dst, USE src, KILL fcc0); 8464 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8465 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8466 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8467 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8468 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8469 "skip:" %} 8470 ins_encode(form_d2i_helper(src,dst)); 8471 ins_pipe(fcvtD2I); 8472 %} 8473 8474 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8475 match(Set dst (ConvD2I src)); 8476 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8477 expand %{ 8478 regF tmp; 8479 convD2I_helper(tmp, src); 8480 regF_to_stkI(dst, tmp); 8481 %} 8482 %} 8483 8484 instruct convD2I_reg(iRegI dst, regD src) %{ 8485 predicate(UseVIS >= 3); 8486 match(Set dst (ConvD2I src)); 8487 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8488 expand %{ 8489 regF tmp; 8490 convD2I_helper(tmp, src); 8491 MoveF2I_reg_reg(dst, tmp); 8492 %} 8493 %} 8494 8495 8496 // Convert a double to a long in a double register. 8497 // If the double is a NAN, stuff a zero in instead. 8498 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8499 effect(DEF dst, USE src, KILL fcc0); 8500 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8501 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8502 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8503 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8504 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8505 "skip:" %} 8506 ins_encode(form_d2l_helper(src,dst)); 8507 ins_pipe(fcvtD2L); 8508 %} 8509 8510 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8511 match(Set dst (ConvD2L src)); 8512 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8513 expand %{ 8514 regD tmp; 8515 convD2L_helper(tmp, src); 8516 regD_to_stkL(dst, tmp); 8517 %} 8518 %} 8519 8520 instruct convD2L_reg(iRegL dst, regD src) %{ 8521 predicate(UseVIS >= 3); 8522 match(Set dst (ConvD2L src)); 8523 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8524 expand %{ 8525 regD tmp; 8526 convD2L_helper(tmp, src); 8527 MoveD2L_reg_reg(dst, tmp); 8528 %} 8529 %} 8530 8531 8532 instruct convF2D_reg(regD dst, regF src) %{ 8533 match(Set dst (ConvF2D src)); 8534 format %{ "FSTOD $src,$dst" %} 8535 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8536 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8537 ins_pipe(fcvtF2D); 8538 %} 8539 8540 8541 // Convert a float to an int in a float register. 8542 // If the float is a NAN, stuff a zero in instead. 8543 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8544 effect(DEF dst, USE src, KILL fcc0); 8545 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8546 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8547 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8548 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8549 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8550 "skip:" %} 8551 ins_encode(form_f2i_helper(src,dst)); 8552 ins_pipe(fcvtF2I); 8553 %} 8554 8555 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8556 match(Set dst (ConvF2I src)); 8557 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8558 expand %{ 8559 regF tmp; 8560 convF2I_helper(tmp, src); 8561 regF_to_stkI(dst, tmp); 8562 %} 8563 %} 8564 8565 instruct convF2I_reg(iRegI dst, regF src) %{ 8566 predicate(UseVIS >= 3); 8567 match(Set dst (ConvF2I src)); 8568 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8569 expand %{ 8570 regF tmp; 8571 convF2I_helper(tmp, src); 8572 MoveF2I_reg_reg(dst, tmp); 8573 %} 8574 %} 8575 8576 8577 // Convert a float to a long in a float register. 8578 // If the float is a NAN, stuff a zero in instead. 8579 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8580 effect(DEF dst, USE src, KILL fcc0); 8581 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8582 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8583 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8584 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8585 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8586 "skip:" %} 8587 ins_encode(form_f2l_helper(src,dst)); 8588 ins_pipe(fcvtF2L); 8589 %} 8590 8591 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8592 match(Set dst (ConvF2L src)); 8593 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8594 expand %{ 8595 regD tmp; 8596 convF2L_helper(tmp, src); 8597 regD_to_stkL(dst, tmp); 8598 %} 8599 %} 8600 8601 instruct convF2L_reg(iRegL dst, regF src) %{ 8602 predicate(UseVIS >= 3); 8603 match(Set dst (ConvF2L src)); 8604 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8605 expand %{ 8606 regD tmp; 8607 convF2L_helper(tmp, src); 8608 MoveD2L_reg_reg(dst, tmp); 8609 %} 8610 %} 8611 8612 8613 instruct convI2D_helper(regD dst, regF tmp) %{ 8614 effect(USE tmp, DEF dst); 8615 format %{ "FITOD $tmp,$dst" %} 8616 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8617 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8618 ins_pipe(fcvtI2D); 8619 %} 8620 8621 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8622 match(Set dst (ConvI2D src)); 8623 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8624 expand %{ 8625 regF tmp; 8626 stkI_to_regF(tmp, src); 8627 convI2D_helper(dst, tmp); 8628 %} 8629 %} 8630 8631 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8632 predicate(UseVIS >= 3); 8633 match(Set dst (ConvI2D src)); 8634 expand %{ 8635 regF tmp; 8636 MoveI2F_reg_reg(tmp, src); 8637 convI2D_helper(dst, tmp); 8638 %} 8639 %} 8640 8641 instruct convI2D_mem(regD_low dst, memory mem) %{ 8642 match(Set dst (ConvI2D (LoadI mem))); 8643 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8644 size(8); 8645 format %{ "LDF $mem,$dst\n\t" 8646 "FITOD $dst,$dst" %} 8647 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8648 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8649 ins_pipe(floadF_mem); 8650 %} 8651 8652 8653 instruct convI2F_helper(regF dst, regF tmp) %{ 8654 effect(DEF dst, USE tmp); 8655 format %{ "FITOS $tmp,$dst" %} 8656 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8657 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8658 ins_pipe(fcvtI2F); 8659 %} 8660 8661 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8662 match(Set dst (ConvI2F src)); 8663 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8664 expand %{ 8665 regF tmp; 8666 stkI_to_regF(tmp,src); 8667 convI2F_helper(dst, tmp); 8668 %} 8669 %} 8670 8671 instruct convI2F_reg(regF dst, iRegI src) %{ 8672 predicate(UseVIS >= 3); 8673 match(Set dst (ConvI2F src)); 8674 ins_cost(DEFAULT_COST); 8675 expand %{ 8676 regF tmp; 8677 MoveI2F_reg_reg(tmp, src); 8678 convI2F_helper(dst, tmp); 8679 %} 8680 %} 8681 8682 instruct convI2F_mem( regF dst, memory mem ) %{ 8683 match(Set dst (ConvI2F (LoadI mem))); 8684 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8685 size(8); 8686 format %{ "LDF $mem,$dst\n\t" 8687 "FITOS $dst,$dst" %} 8688 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8689 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8690 ins_pipe(floadF_mem); 8691 %} 8692 8693 8694 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8695 match(Set dst (ConvI2L src)); 8696 size(4); 8697 format %{ "SRA $src,0,$dst\t! int->long" %} 8698 opcode(Assembler::sra_op3, Assembler::arith_op); 8699 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8700 ins_pipe(ialu_reg_reg); 8701 %} 8702 8703 // Zero-extend convert int to long 8704 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8705 match(Set dst (AndL (ConvI2L src) mask) ); 8706 size(4); 8707 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8708 opcode(Assembler::srl_op3, Assembler::arith_op); 8709 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8710 ins_pipe(ialu_reg_reg); 8711 %} 8712 8713 // Zero-extend long 8714 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8715 match(Set dst (AndL src mask) ); 8716 size(4); 8717 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8718 opcode(Assembler::srl_op3, Assembler::arith_op); 8719 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8720 ins_pipe(ialu_reg_reg); 8721 %} 8722 8723 8724 //----------- 8725 // Long to Double conversion using V8 opcodes. 8726 // Still useful because cheetah traps and becomes 8727 // amazingly slow for some common numbers. 8728 8729 // Magic constant, 0x43300000 8730 instruct loadConI_x43300000(iRegI dst) %{ 8731 effect(DEF dst); 8732 size(4); 8733 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8734 ins_encode(SetHi22(0x43300000, dst)); 8735 ins_pipe(ialu_none); 8736 %} 8737 8738 // Magic constant, 0x41f00000 8739 instruct loadConI_x41f00000(iRegI dst) %{ 8740 effect(DEF dst); 8741 size(4); 8742 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8743 ins_encode(SetHi22(0x41f00000, dst)); 8744 ins_pipe(ialu_none); 8745 %} 8746 8747 // Construct a double from two float halves 8748 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8749 effect(DEF dst, USE src1, USE src2); 8750 size(8); 8751 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8752 "FMOVS $src2.lo,$dst.lo" %} 8753 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8754 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8755 ins_pipe(faddD_reg_reg); 8756 %} 8757 8758 // Convert integer in high half of a double register (in the lower half of 8759 // the double register file) to double 8760 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8761 effect(DEF dst, USE src); 8762 size(4); 8763 format %{ "FITOD $src,$dst" %} 8764 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8765 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8766 ins_pipe(fcvtLHi2D); 8767 %} 8768 8769 // Add float double precision 8770 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8771 effect(DEF dst, USE src1, USE src2); 8772 size(4); 8773 format %{ "FADDD $src1,$src2,$dst" %} 8774 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8775 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8776 ins_pipe(faddD_reg_reg); 8777 %} 8778 8779 // Sub float double precision 8780 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8781 effect(DEF dst, USE src1, USE src2); 8782 size(4); 8783 format %{ "FSUBD $src1,$src2,$dst" %} 8784 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8785 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8786 ins_pipe(faddD_reg_reg); 8787 %} 8788 8789 // Mul float double precision 8790 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8791 effect(DEF dst, USE src1, USE src2); 8792 size(4); 8793 format %{ "FMULD $src1,$src2,$dst" %} 8794 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8795 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8796 ins_pipe(fmulD_reg_reg); 8797 %} 8798 8799 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{ 8800 match(Set dst (ConvL2D src)); 8801 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); 8802 8803 expand %{ 8804 regD_low tmpsrc; 8805 iRegI ix43300000; 8806 iRegI ix41f00000; 8807 stackSlotL lx43300000; 8808 stackSlotL lx41f00000; 8809 regD_low dx43300000; 8810 regD dx41f00000; 8811 regD tmp1; 8812 regD_low tmp2; 8813 regD tmp3; 8814 regD tmp4; 8815 8816 stkL_to_regD(tmpsrc, src); 8817 8818 loadConI_x43300000(ix43300000); 8819 loadConI_x41f00000(ix41f00000); 8820 regI_to_stkLHi(lx43300000, ix43300000); 8821 regI_to_stkLHi(lx41f00000, ix41f00000); 8822 stkL_to_regD(dx43300000, lx43300000); 8823 stkL_to_regD(dx41f00000, lx41f00000); 8824 8825 convI2D_regDHi_regD(tmp1, tmpsrc); 8826 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc); 8827 subD_regD_regD(tmp3, tmp2, dx43300000); 8828 mulD_regD_regD(tmp4, tmp1, dx41f00000); 8829 addD_regD_regD(dst, tmp3, tmp4); 8830 %} 8831 %} 8832 8833 // Long to Double conversion using fast fxtof 8834 instruct convL2D_helper(regD dst, regD tmp) %{ 8835 effect(DEF dst, USE tmp); 8836 size(4); 8837 format %{ "FXTOD $tmp,$dst" %} 8838 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8839 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8840 ins_pipe(fcvtL2D); 8841 %} 8842 8843 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8844 predicate(VM_Version::has_fast_fxtof()); 8845 match(Set dst (ConvL2D src)); 8846 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8847 expand %{ 8848 regD tmp; 8849 stkL_to_regD(tmp, src); 8850 convL2D_helper(dst, tmp); 8851 %} 8852 %} 8853 8854 instruct convL2D_reg(regD dst, iRegL src) %{ 8855 predicate(UseVIS >= 3); 8856 match(Set dst (ConvL2D src)); 8857 expand %{ 8858 regD tmp; 8859 MoveL2D_reg_reg(tmp, src); 8860 convL2D_helper(dst, tmp); 8861 %} 8862 %} 8863 8864 // Long to Float conversion using fast fxtof 8865 instruct convL2F_helper(regF dst, regD tmp) %{ 8866 effect(DEF dst, USE tmp); 8867 size(4); 8868 format %{ "FXTOS $tmp,$dst" %} 8869 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8870 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8871 ins_pipe(fcvtL2F); 8872 %} 8873 8874 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8875 match(Set dst (ConvL2F src)); 8876 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8877 expand %{ 8878 regD tmp; 8879 stkL_to_regD(tmp, src); 8880 convL2F_helper(dst, tmp); 8881 %} 8882 %} 8883 8884 instruct convL2F_reg(regF dst, iRegL src) %{ 8885 predicate(UseVIS >= 3); 8886 match(Set dst (ConvL2F src)); 8887 ins_cost(DEFAULT_COST); 8888 expand %{ 8889 regD tmp; 8890 MoveL2D_reg_reg(tmp, src); 8891 convL2F_helper(dst, tmp); 8892 %} 8893 %} 8894 8895 //----------- 8896 8897 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8898 match(Set dst (ConvL2I src)); 8899 #ifndef _LP64 8900 format %{ "MOV $src.lo,$dst\t! long->int" %} 8901 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) ); 8902 ins_pipe(ialu_move_reg_I_to_L); 8903 #else 8904 size(4); 8905 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8906 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8907 ins_pipe(ialu_reg); 8908 #endif 8909 %} 8910 8911 // Register Shift Right Immediate 8912 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8913 match(Set dst (ConvL2I (RShiftL src cnt))); 8914 8915 size(4); 8916 format %{ "SRAX $src,$cnt,$dst" %} 8917 opcode(Assembler::srax_op3, Assembler::arith_op); 8918 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8919 ins_pipe(ialu_reg_imm); 8920 %} 8921 8922 //----------Control Flow Instructions------------------------------------------ 8923 // Compare Instructions 8924 // Compare Integers 8925 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 8926 match(Set icc (CmpI op1 op2)); 8927 effect( DEF icc, USE op1, USE op2 ); 8928 8929 size(4); 8930 format %{ "CMP $op1,$op2" %} 8931 opcode(Assembler::subcc_op3, Assembler::arith_op); 8932 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8933 ins_pipe(ialu_cconly_reg_reg); 8934 %} 8935 8936 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 8937 match(Set icc (CmpU op1 op2)); 8938 8939 size(4); 8940 format %{ "CMP $op1,$op2\t! unsigned" %} 8941 opcode(Assembler::subcc_op3, Assembler::arith_op); 8942 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8943 ins_pipe(ialu_cconly_reg_reg); 8944 %} 8945 8946 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 8947 match(Set icc (CmpI op1 op2)); 8948 effect( DEF icc, USE op1 ); 8949 8950 size(4); 8951 format %{ "CMP $op1,$op2" %} 8952 opcode(Assembler::subcc_op3, Assembler::arith_op); 8953 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8954 ins_pipe(ialu_cconly_reg_imm); 8955 %} 8956 8957 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 8958 match(Set icc (CmpI (AndI op1 op2) zero)); 8959 8960 size(4); 8961 format %{ "BTST $op2,$op1" %} 8962 opcode(Assembler::andcc_op3, Assembler::arith_op); 8963 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8964 ins_pipe(ialu_cconly_reg_reg_zero); 8965 %} 8966 8967 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 8968 match(Set icc (CmpI (AndI op1 op2) zero)); 8969 8970 size(4); 8971 format %{ "BTST $op2,$op1" %} 8972 opcode(Assembler::andcc_op3, Assembler::arith_op); 8973 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8974 ins_pipe(ialu_cconly_reg_imm_zero); 8975 %} 8976 8977 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 8978 match(Set xcc (CmpL op1 op2)); 8979 effect( DEF xcc, USE op1, USE op2 ); 8980 8981 size(4); 8982 format %{ "CMP $op1,$op2\t\t! long" %} 8983 opcode(Assembler::subcc_op3, Assembler::arith_op); 8984 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8985 ins_pipe(ialu_cconly_reg_reg); 8986 %} 8987 8988 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 8989 match(Set xcc (CmpL op1 con)); 8990 effect( DEF xcc, USE op1, USE con ); 8991 8992 size(4); 8993 format %{ "CMP $op1,$con\t\t! long" %} 8994 opcode(Assembler::subcc_op3, Assembler::arith_op); 8995 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 8996 ins_pipe(ialu_cconly_reg_reg); 8997 %} 8998 8999 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 9000 match(Set xcc (CmpL (AndL op1 op2) zero)); 9001 effect( DEF xcc, USE op1, USE op2 ); 9002 9003 size(4); 9004 format %{ "BTST $op1,$op2\t\t! long" %} 9005 opcode(Assembler::andcc_op3, Assembler::arith_op); 9006 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9007 ins_pipe(ialu_cconly_reg_reg); 9008 %} 9009 9010 // useful for checking the alignment of a pointer: 9011 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 9012 match(Set xcc (CmpL (AndL op1 con) zero)); 9013 effect( DEF xcc, USE op1, USE con ); 9014 9015 size(4); 9016 format %{ "BTST $op1,$con\t\t! long" %} 9017 opcode(Assembler::andcc_op3, Assembler::arith_op); 9018 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 9019 ins_pipe(ialu_cconly_reg_reg); 9020 %} 9021 9022 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{ 9023 match(Set icc (CmpU op1 op2)); 9024 9025 size(4); 9026 format %{ "CMP $op1,$op2\t! unsigned" %} 9027 opcode(Assembler::subcc_op3, Assembler::arith_op); 9028 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9029 ins_pipe(ialu_cconly_reg_imm); 9030 %} 9031 9032 // Compare Pointers 9033 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 9034 match(Set pcc (CmpP op1 op2)); 9035 9036 size(4); 9037 format %{ "CMP $op1,$op2\t! ptr" %} 9038 opcode(Assembler::subcc_op3, Assembler::arith_op); 9039 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9040 ins_pipe(ialu_cconly_reg_reg); 9041 %} 9042 9043 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 9044 match(Set pcc (CmpP op1 op2)); 9045 9046 size(4); 9047 format %{ "CMP $op1,$op2\t! ptr" %} 9048 opcode(Assembler::subcc_op3, Assembler::arith_op); 9049 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9050 ins_pipe(ialu_cconly_reg_imm); 9051 %} 9052 9053 // Compare Narrow oops 9054 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 9055 match(Set icc (CmpN op1 op2)); 9056 9057 size(4); 9058 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9059 opcode(Assembler::subcc_op3, Assembler::arith_op); 9060 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9061 ins_pipe(ialu_cconly_reg_reg); 9062 %} 9063 9064 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 9065 match(Set icc (CmpN op1 op2)); 9066 9067 size(4); 9068 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9069 opcode(Assembler::subcc_op3, Assembler::arith_op); 9070 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9071 ins_pipe(ialu_cconly_reg_imm); 9072 %} 9073 9074 //----------Max and Min-------------------------------------------------------- 9075 // Min Instructions 9076 // Conditional move for min 9077 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9078 effect( USE_DEF op2, USE op1, USE icc ); 9079 9080 size(4); 9081 format %{ "MOVlt icc,$op1,$op2\t! min" %} 9082 opcode(Assembler::less); 9083 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9084 ins_pipe(ialu_reg_flags); 9085 %} 9086 9087 // Min Register with Register. 9088 instruct minI_eReg(iRegI op1, iRegI op2) %{ 9089 match(Set op2 (MinI op1 op2)); 9090 ins_cost(DEFAULT_COST*2); 9091 expand %{ 9092 flagsReg icc; 9093 compI_iReg(icc,op1,op2); 9094 cmovI_reg_lt(op2,op1,icc); 9095 %} 9096 %} 9097 9098 // Max Instructions 9099 // Conditional move for max 9100 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9101 effect( USE_DEF op2, USE op1, USE icc ); 9102 format %{ "MOVgt icc,$op1,$op2\t! max" %} 9103 opcode(Assembler::greater); 9104 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9105 ins_pipe(ialu_reg_flags); 9106 %} 9107 9108 // Max Register with Register 9109 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 9110 match(Set op2 (MaxI op1 op2)); 9111 ins_cost(DEFAULT_COST*2); 9112 expand %{ 9113 flagsReg icc; 9114 compI_iReg(icc,op1,op2); 9115 cmovI_reg_gt(op2,op1,icc); 9116 %} 9117 %} 9118 9119 9120 //----------Float Compares---------------------------------------------------- 9121 // Compare floating, generate condition code 9122 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 9123 match(Set fcc (CmpF src1 src2)); 9124 9125 size(4); 9126 format %{ "FCMPs $fcc,$src1,$src2" %} 9127 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 9128 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 9129 ins_pipe(faddF_fcc_reg_reg_zero); 9130 %} 9131 9132 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 9133 match(Set fcc (CmpD src1 src2)); 9134 9135 size(4); 9136 format %{ "FCMPd $fcc,$src1,$src2" %} 9137 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 9138 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 9139 ins_pipe(faddD_fcc_reg_reg_zero); 9140 %} 9141 9142 9143 // Compare floating, generate -1,0,1 9144 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 9145 match(Set dst (CmpF3 src1 src2)); 9146 effect(KILL fcc0); 9147 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9148 format %{ "fcmpl $dst,$src1,$src2" %} 9149 // Primary = float 9150 opcode( true ); 9151 ins_encode( floating_cmp( dst, src1, src2 ) ); 9152 ins_pipe( floating_cmp ); 9153 %} 9154 9155 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 9156 match(Set dst (CmpD3 src1 src2)); 9157 effect(KILL fcc0); 9158 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9159 format %{ "dcmpl $dst,$src1,$src2" %} 9160 // Primary = double (not float) 9161 opcode( false ); 9162 ins_encode( floating_cmp( dst, src1, src2 ) ); 9163 ins_pipe( floating_cmp ); 9164 %} 9165 9166 //----------Branches--------------------------------------------------------- 9167 // Jump 9168 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 9169 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 9170 match(Jump switch_val); 9171 effect(TEMP table); 9172 9173 ins_cost(350); 9174 9175 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 9176 "LD [O7 + $switch_val], O7\n\t" 9177 "JUMP O7" %} 9178 ins_encode %{ 9179 // Calculate table address into a register. 9180 Register table_reg; 9181 Register label_reg = O7; 9182 // If we are calculating the size of this instruction don't trust 9183 // zero offsets because they might change when 9184 // MachConstantBaseNode decides to optimize the constant table 9185 // base. 9186 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 9187 table_reg = $constanttablebase; 9188 } else { 9189 table_reg = O7; 9190 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 9191 __ add($constanttablebase, con_offset, table_reg); 9192 } 9193 9194 // Jump to base address + switch value 9195 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 9196 __ jmp(label_reg, G0); 9197 __ delayed()->nop(); 9198 %} 9199 ins_pipe(ialu_reg_reg); 9200 %} 9201 9202 // Direct Branch. Use V8 version with longer range. 9203 instruct branch(label labl) %{ 9204 match(Goto); 9205 effect(USE labl); 9206 9207 size(8); 9208 ins_cost(BRANCH_COST); 9209 format %{ "BA $labl" %} 9210 ins_encode %{ 9211 Label* L = $labl$$label; 9212 __ ba(*L); 9213 __ delayed()->nop(); 9214 %} 9215 ins_avoid_back_to_back(AVOID_BEFORE); 9216 ins_pipe(br); 9217 %} 9218 9219 // Direct Branch, short with no delay slot 9220 instruct branch_short(label labl) %{ 9221 match(Goto); 9222 predicate(UseCBCond); 9223 effect(USE labl); 9224 9225 size(4); 9226 ins_cost(BRANCH_COST); 9227 format %{ "BA $labl\t! short branch" %} 9228 ins_encode %{ 9229 Label* L = $labl$$label; 9230 assert(__ use_cbcond(*L), "back to back cbcond"); 9231 __ ba_short(*L); 9232 %} 9233 ins_short_branch(1); 9234 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9235 ins_pipe(cbcond_reg_imm); 9236 %} 9237 9238 // Conditional Direct Branch 9239 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 9240 match(If cmp icc); 9241 effect(USE labl); 9242 9243 size(8); 9244 ins_cost(BRANCH_COST); 9245 format %{ "BP$cmp $icc,$labl" %} 9246 // Prim = bits 24-22, Secnd = bits 31-30 9247 ins_encode( enc_bp( labl, cmp, icc ) ); 9248 ins_avoid_back_to_back(AVOID_BEFORE); 9249 ins_pipe(br_cc); 9250 %} 9251 9252 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9253 match(If cmp icc); 9254 effect(USE labl); 9255 9256 ins_cost(BRANCH_COST); 9257 format %{ "BP$cmp $icc,$labl" %} 9258 // Prim = bits 24-22, Secnd = bits 31-30 9259 ins_encode( enc_bp( labl, cmp, icc ) ); 9260 ins_avoid_back_to_back(AVOID_BEFORE); 9261 ins_pipe(br_cc); 9262 %} 9263 9264 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 9265 match(If cmp pcc); 9266 effect(USE labl); 9267 9268 size(8); 9269 ins_cost(BRANCH_COST); 9270 format %{ "BP$cmp $pcc,$labl" %} 9271 ins_encode %{ 9272 Label* L = $labl$$label; 9273 Assembler::Predict predict_taken = 9274 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9275 9276 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9277 __ delayed()->nop(); 9278 %} 9279 ins_avoid_back_to_back(AVOID_BEFORE); 9280 ins_pipe(br_cc); 9281 %} 9282 9283 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 9284 match(If cmp fcc); 9285 effect(USE labl); 9286 9287 size(8); 9288 ins_cost(BRANCH_COST); 9289 format %{ "FBP$cmp $fcc,$labl" %} 9290 ins_encode %{ 9291 Label* L = $labl$$label; 9292 Assembler::Predict predict_taken = 9293 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9294 9295 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 9296 __ delayed()->nop(); 9297 %} 9298 ins_avoid_back_to_back(AVOID_BEFORE); 9299 ins_pipe(br_fcc); 9300 %} 9301 9302 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 9303 match(CountedLoopEnd cmp icc); 9304 effect(USE labl); 9305 9306 size(8); 9307 ins_cost(BRANCH_COST); 9308 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9309 // Prim = bits 24-22, Secnd = bits 31-30 9310 ins_encode( enc_bp( labl, cmp, icc ) ); 9311 ins_avoid_back_to_back(AVOID_BEFORE); 9312 ins_pipe(br_cc); 9313 %} 9314 9315 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9316 match(CountedLoopEnd cmp icc); 9317 effect(USE labl); 9318 9319 size(8); 9320 ins_cost(BRANCH_COST); 9321 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9322 // Prim = bits 24-22, Secnd = bits 31-30 9323 ins_encode( enc_bp( labl, cmp, icc ) ); 9324 ins_avoid_back_to_back(AVOID_BEFORE); 9325 ins_pipe(br_cc); 9326 %} 9327 9328 // Compare and branch instructions 9329 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9330 match(If cmp (CmpI op1 op2)); 9331 effect(USE labl, KILL icc); 9332 9333 size(12); 9334 ins_cost(BRANCH_COST); 9335 format %{ "CMP $op1,$op2\t! int\n\t" 9336 "BP$cmp $labl" %} 9337 ins_encode %{ 9338 Label* L = $labl$$label; 9339 Assembler::Predict predict_taken = 9340 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9341 __ cmp($op1$$Register, $op2$$Register); 9342 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9343 __ delayed()->nop(); 9344 %} 9345 ins_pipe(cmp_br_reg_reg); 9346 %} 9347 9348 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9349 match(If cmp (CmpI op1 op2)); 9350 effect(USE labl, KILL icc); 9351 9352 size(12); 9353 ins_cost(BRANCH_COST); 9354 format %{ "CMP $op1,$op2\t! int\n\t" 9355 "BP$cmp $labl" %} 9356 ins_encode %{ 9357 Label* L = $labl$$label; 9358 Assembler::Predict predict_taken = 9359 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9360 __ cmp($op1$$Register, $op2$$constant); 9361 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9362 __ delayed()->nop(); 9363 %} 9364 ins_pipe(cmp_br_reg_imm); 9365 %} 9366 9367 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9368 match(If cmp (CmpU op1 op2)); 9369 effect(USE labl, KILL icc); 9370 9371 size(12); 9372 ins_cost(BRANCH_COST); 9373 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9374 "BP$cmp $labl" %} 9375 ins_encode %{ 9376 Label* L = $labl$$label; 9377 Assembler::Predict predict_taken = 9378 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9379 __ cmp($op1$$Register, $op2$$Register); 9380 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9381 __ delayed()->nop(); 9382 %} 9383 ins_pipe(cmp_br_reg_reg); 9384 %} 9385 9386 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9387 match(If cmp (CmpU op1 op2)); 9388 effect(USE labl, KILL icc); 9389 9390 size(12); 9391 ins_cost(BRANCH_COST); 9392 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9393 "BP$cmp $labl" %} 9394 ins_encode %{ 9395 Label* L = $labl$$label; 9396 Assembler::Predict predict_taken = 9397 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9398 __ cmp($op1$$Register, $op2$$constant); 9399 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9400 __ delayed()->nop(); 9401 %} 9402 ins_pipe(cmp_br_reg_imm); 9403 %} 9404 9405 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9406 match(If cmp (CmpL op1 op2)); 9407 effect(USE labl, KILL xcc); 9408 9409 size(12); 9410 ins_cost(BRANCH_COST); 9411 format %{ "CMP $op1,$op2\t! long\n\t" 9412 "BP$cmp $labl" %} 9413 ins_encode %{ 9414 Label* L = $labl$$label; 9415 Assembler::Predict predict_taken = 9416 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9417 __ cmp($op1$$Register, $op2$$Register); 9418 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9419 __ delayed()->nop(); 9420 %} 9421 ins_pipe(cmp_br_reg_reg); 9422 %} 9423 9424 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9425 match(If cmp (CmpL op1 op2)); 9426 effect(USE labl, KILL xcc); 9427 9428 size(12); 9429 ins_cost(BRANCH_COST); 9430 format %{ "CMP $op1,$op2\t! long\n\t" 9431 "BP$cmp $labl" %} 9432 ins_encode %{ 9433 Label* L = $labl$$label; 9434 Assembler::Predict predict_taken = 9435 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9436 __ cmp($op1$$Register, $op2$$constant); 9437 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9438 __ delayed()->nop(); 9439 %} 9440 ins_pipe(cmp_br_reg_imm); 9441 %} 9442 9443 // Compare Pointers and branch 9444 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9445 match(If cmp (CmpP op1 op2)); 9446 effect(USE labl, KILL pcc); 9447 9448 size(12); 9449 ins_cost(BRANCH_COST); 9450 format %{ "CMP $op1,$op2\t! ptr\n\t" 9451 "B$cmp $labl" %} 9452 ins_encode %{ 9453 Label* L = $labl$$label; 9454 Assembler::Predict predict_taken = 9455 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9456 __ cmp($op1$$Register, $op2$$Register); 9457 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9458 __ delayed()->nop(); 9459 %} 9460 ins_pipe(cmp_br_reg_reg); 9461 %} 9462 9463 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9464 match(If cmp (CmpP op1 null)); 9465 effect(USE labl, KILL pcc); 9466 9467 size(12); 9468 ins_cost(BRANCH_COST); 9469 format %{ "CMP $op1,0\t! ptr\n\t" 9470 "B$cmp $labl" %} 9471 ins_encode %{ 9472 Label* L = $labl$$label; 9473 Assembler::Predict predict_taken = 9474 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9475 __ cmp($op1$$Register, G0); 9476 // bpr() is not used here since it has shorter distance. 9477 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9478 __ delayed()->nop(); 9479 %} 9480 ins_pipe(cmp_br_reg_reg); 9481 %} 9482 9483 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9484 match(If cmp (CmpN op1 op2)); 9485 effect(USE labl, KILL icc); 9486 9487 size(12); 9488 ins_cost(BRANCH_COST); 9489 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9490 "BP$cmp $labl" %} 9491 ins_encode %{ 9492 Label* L = $labl$$label; 9493 Assembler::Predict predict_taken = 9494 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9495 __ cmp($op1$$Register, $op2$$Register); 9496 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9497 __ delayed()->nop(); 9498 %} 9499 ins_pipe(cmp_br_reg_reg); 9500 %} 9501 9502 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9503 match(If cmp (CmpN op1 null)); 9504 effect(USE labl, KILL icc); 9505 9506 size(12); 9507 ins_cost(BRANCH_COST); 9508 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9509 "BP$cmp $labl" %} 9510 ins_encode %{ 9511 Label* L = $labl$$label; 9512 Assembler::Predict predict_taken = 9513 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9514 __ cmp($op1$$Register, G0); 9515 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9516 __ delayed()->nop(); 9517 %} 9518 ins_pipe(cmp_br_reg_reg); 9519 %} 9520 9521 // Loop back branch 9522 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9523 match(CountedLoopEnd cmp (CmpI op1 op2)); 9524 effect(USE labl, KILL icc); 9525 9526 size(12); 9527 ins_cost(BRANCH_COST); 9528 format %{ "CMP $op1,$op2\t! int\n\t" 9529 "BP$cmp $labl\t! Loop end" %} 9530 ins_encode %{ 9531 Label* L = $labl$$label; 9532 Assembler::Predict predict_taken = 9533 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9534 __ cmp($op1$$Register, $op2$$Register); 9535 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9536 __ delayed()->nop(); 9537 %} 9538 ins_pipe(cmp_br_reg_reg); 9539 %} 9540 9541 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9542 match(CountedLoopEnd cmp (CmpI op1 op2)); 9543 effect(USE labl, KILL icc); 9544 9545 size(12); 9546 ins_cost(BRANCH_COST); 9547 format %{ "CMP $op1,$op2\t! int\n\t" 9548 "BP$cmp $labl\t! Loop end" %} 9549 ins_encode %{ 9550 Label* L = $labl$$label; 9551 Assembler::Predict predict_taken = 9552 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9553 __ cmp($op1$$Register, $op2$$constant); 9554 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9555 __ delayed()->nop(); 9556 %} 9557 ins_pipe(cmp_br_reg_imm); 9558 %} 9559 9560 // Short compare and branch instructions 9561 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9562 match(If cmp (CmpI op1 op2)); 9563 predicate(UseCBCond); 9564 effect(USE labl, KILL icc); 9565 9566 size(4); 9567 ins_cost(BRANCH_COST); 9568 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9569 ins_encode %{ 9570 Label* L = $labl$$label; 9571 assert(__ use_cbcond(*L), "back to back cbcond"); 9572 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9573 %} 9574 ins_short_branch(1); 9575 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9576 ins_pipe(cbcond_reg_reg); 9577 %} 9578 9579 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9580 match(If cmp (CmpI op1 op2)); 9581 predicate(UseCBCond); 9582 effect(USE labl, KILL icc); 9583 9584 size(4); 9585 ins_cost(BRANCH_COST); 9586 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9587 ins_encode %{ 9588 Label* L = $labl$$label; 9589 assert(__ use_cbcond(*L), "back to back cbcond"); 9590 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9591 %} 9592 ins_short_branch(1); 9593 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9594 ins_pipe(cbcond_reg_imm); 9595 %} 9596 9597 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9598 match(If cmp (CmpU op1 op2)); 9599 predicate(UseCBCond); 9600 effect(USE labl, KILL icc); 9601 9602 size(4); 9603 ins_cost(BRANCH_COST); 9604 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9605 ins_encode %{ 9606 Label* L = $labl$$label; 9607 assert(__ use_cbcond(*L), "back to back cbcond"); 9608 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9609 %} 9610 ins_short_branch(1); 9611 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9612 ins_pipe(cbcond_reg_reg); 9613 %} 9614 9615 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9616 match(If cmp (CmpU op1 op2)); 9617 predicate(UseCBCond); 9618 effect(USE labl, KILL icc); 9619 9620 size(4); 9621 ins_cost(BRANCH_COST); 9622 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9623 ins_encode %{ 9624 Label* L = $labl$$label; 9625 assert(__ use_cbcond(*L), "back to back cbcond"); 9626 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9627 %} 9628 ins_short_branch(1); 9629 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9630 ins_pipe(cbcond_reg_imm); 9631 %} 9632 9633 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9634 match(If cmp (CmpL op1 op2)); 9635 predicate(UseCBCond); 9636 effect(USE labl, KILL xcc); 9637 9638 size(4); 9639 ins_cost(BRANCH_COST); 9640 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9641 ins_encode %{ 9642 Label* L = $labl$$label; 9643 assert(__ use_cbcond(*L), "back to back cbcond"); 9644 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9645 %} 9646 ins_short_branch(1); 9647 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9648 ins_pipe(cbcond_reg_reg); 9649 %} 9650 9651 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9652 match(If cmp (CmpL op1 op2)); 9653 predicate(UseCBCond); 9654 effect(USE labl, KILL xcc); 9655 9656 size(4); 9657 ins_cost(BRANCH_COST); 9658 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9659 ins_encode %{ 9660 Label* L = $labl$$label; 9661 assert(__ use_cbcond(*L), "back to back cbcond"); 9662 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9663 %} 9664 ins_short_branch(1); 9665 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9666 ins_pipe(cbcond_reg_imm); 9667 %} 9668 9669 // Compare Pointers and branch 9670 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9671 match(If cmp (CmpP op1 op2)); 9672 predicate(UseCBCond); 9673 effect(USE labl, KILL pcc); 9674 9675 size(4); 9676 ins_cost(BRANCH_COST); 9677 #ifdef _LP64 9678 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9679 #else 9680 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %} 9681 #endif 9682 ins_encode %{ 9683 Label* L = $labl$$label; 9684 assert(__ use_cbcond(*L), "back to back cbcond"); 9685 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9686 %} 9687 ins_short_branch(1); 9688 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9689 ins_pipe(cbcond_reg_reg); 9690 %} 9691 9692 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9693 match(If cmp (CmpP op1 null)); 9694 predicate(UseCBCond); 9695 effect(USE labl, KILL pcc); 9696 9697 size(4); 9698 ins_cost(BRANCH_COST); 9699 #ifdef _LP64 9700 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9701 #else 9702 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %} 9703 #endif 9704 ins_encode %{ 9705 Label* L = $labl$$label; 9706 assert(__ use_cbcond(*L), "back to back cbcond"); 9707 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9708 %} 9709 ins_short_branch(1); 9710 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9711 ins_pipe(cbcond_reg_reg); 9712 %} 9713 9714 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9715 match(If cmp (CmpN op1 op2)); 9716 predicate(UseCBCond); 9717 effect(USE labl, KILL icc); 9718 9719 size(4); 9720 ins_cost(BRANCH_COST); 9721 format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %} 9722 ins_encode %{ 9723 Label* L = $labl$$label; 9724 assert(__ use_cbcond(*L), "back to back cbcond"); 9725 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9726 %} 9727 ins_short_branch(1); 9728 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9729 ins_pipe(cbcond_reg_reg); 9730 %} 9731 9732 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9733 match(If cmp (CmpN op1 null)); 9734 predicate(UseCBCond); 9735 effect(USE labl, KILL icc); 9736 9737 size(4); 9738 ins_cost(BRANCH_COST); 9739 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9740 ins_encode %{ 9741 Label* L = $labl$$label; 9742 assert(__ use_cbcond(*L), "back to back cbcond"); 9743 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9744 %} 9745 ins_short_branch(1); 9746 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9747 ins_pipe(cbcond_reg_reg); 9748 %} 9749 9750 // Loop back branch 9751 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9752 match(CountedLoopEnd cmp (CmpI op1 op2)); 9753 predicate(UseCBCond); 9754 effect(USE labl, KILL icc); 9755 9756 size(4); 9757 ins_cost(BRANCH_COST); 9758 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9759 ins_encode %{ 9760 Label* L = $labl$$label; 9761 assert(__ use_cbcond(*L), "back to back cbcond"); 9762 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9763 %} 9764 ins_short_branch(1); 9765 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9766 ins_pipe(cbcond_reg_reg); 9767 %} 9768 9769 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9770 match(CountedLoopEnd cmp (CmpI op1 op2)); 9771 predicate(UseCBCond); 9772 effect(USE labl, KILL icc); 9773 9774 size(4); 9775 ins_cost(BRANCH_COST); 9776 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9777 ins_encode %{ 9778 Label* L = $labl$$label; 9779 assert(__ use_cbcond(*L), "back to back cbcond"); 9780 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9781 %} 9782 ins_short_branch(1); 9783 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9784 ins_pipe(cbcond_reg_imm); 9785 %} 9786 9787 // Branch-on-register tests all 64 bits. We assume that values 9788 // in 64-bit registers always remains zero or sign extended 9789 // unless our code munges the high bits. Interrupts can chop 9790 // the high order bits to zero or sign at any time. 9791 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9792 match(If cmp (CmpI op1 zero)); 9793 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9794 effect(USE labl); 9795 9796 size(8); 9797 ins_cost(BRANCH_COST); 9798 format %{ "BR$cmp $op1,$labl" %} 9799 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9800 ins_avoid_back_to_back(AVOID_BEFORE); 9801 ins_pipe(br_reg); 9802 %} 9803 9804 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9805 match(If cmp (CmpP op1 null)); 9806 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9807 effect(USE labl); 9808 9809 size(8); 9810 ins_cost(BRANCH_COST); 9811 format %{ "BR$cmp $op1,$labl" %} 9812 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9813 ins_avoid_back_to_back(AVOID_BEFORE); 9814 ins_pipe(br_reg); 9815 %} 9816 9817 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9818 match(If cmp (CmpL op1 zero)); 9819 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9820 effect(USE labl); 9821 9822 size(8); 9823 ins_cost(BRANCH_COST); 9824 format %{ "BR$cmp $op1,$labl" %} 9825 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9826 ins_avoid_back_to_back(AVOID_BEFORE); 9827 ins_pipe(br_reg); 9828 %} 9829 9830 9831 // ============================================================================ 9832 // Long Compare 9833 // 9834 // Currently we hold longs in 2 registers. Comparing such values efficiently 9835 // is tricky. The flavor of compare used depends on whether we are testing 9836 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9837 // The GE test is the negated LT test. The LE test can be had by commuting 9838 // the operands (yielding a GE test) and then negating; negate again for the 9839 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9840 // NE test is negated from that. 9841 9842 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9843 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9844 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9845 // are collapsed internally in the ADLC's dfa-gen code. The match for 9846 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9847 // foo match ends up with the wrong leaf. One fix is to not match both 9848 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9849 // both forms beat the trinary form of long-compare and both are very useful 9850 // on Intel which has so few registers. 9851 9852 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9853 match(If cmp xcc); 9854 effect(USE labl); 9855 9856 size(8); 9857 ins_cost(BRANCH_COST); 9858 format %{ "BP$cmp $xcc,$labl" %} 9859 ins_encode %{ 9860 Label* L = $labl$$label; 9861 Assembler::Predict predict_taken = 9862 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9863 9864 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9865 __ delayed()->nop(); 9866 %} 9867 ins_avoid_back_to_back(AVOID_BEFORE); 9868 ins_pipe(br_cc); 9869 %} 9870 9871 // Manifest a CmpL3 result in an integer register. Very painful. 9872 // This is the test to avoid. 9873 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9874 match(Set dst (CmpL3 src1 src2) ); 9875 effect( KILL ccr ); 9876 ins_cost(6*DEFAULT_COST); 9877 size(24); 9878 format %{ "CMP $src1,$src2\t\t! long\n" 9879 "\tBLT,a,pn done\n" 9880 "\tMOV -1,$dst\t! delay slot\n" 9881 "\tBGT,a,pn done\n" 9882 "\tMOV 1,$dst\t! delay slot\n" 9883 "\tCLR $dst\n" 9884 "done:" %} 9885 ins_encode( cmpl_flag(src1,src2,dst) ); 9886 ins_pipe(cmpL_reg); 9887 %} 9888 9889 // Conditional move 9890 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9891 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9892 ins_cost(150); 9893 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9894 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9895 ins_pipe(ialu_reg); 9896 %} 9897 9898 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 9899 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9900 ins_cost(140); 9901 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9902 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9903 ins_pipe(ialu_imm); 9904 %} 9905 9906 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 9907 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9908 ins_cost(150); 9909 format %{ "MOV$cmp $xcc,$src,$dst" %} 9910 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9911 ins_pipe(ialu_reg); 9912 %} 9913 9914 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 9915 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9916 ins_cost(140); 9917 format %{ "MOV$cmp $xcc,$src,$dst" %} 9918 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9919 ins_pipe(ialu_imm); 9920 %} 9921 9922 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 9923 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 9924 ins_cost(150); 9925 format %{ "MOV$cmp $xcc,$src,$dst" %} 9926 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9927 ins_pipe(ialu_reg); 9928 %} 9929 9930 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 9931 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9932 ins_cost(150); 9933 format %{ "MOV$cmp $xcc,$src,$dst" %} 9934 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9935 ins_pipe(ialu_reg); 9936 %} 9937 9938 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 9939 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9940 ins_cost(140); 9941 format %{ "MOV$cmp $xcc,$src,$dst" %} 9942 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9943 ins_pipe(ialu_imm); 9944 %} 9945 9946 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 9947 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 9948 ins_cost(150); 9949 opcode(0x101); 9950 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 9951 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9952 ins_pipe(int_conditional_float_move); 9953 %} 9954 9955 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 9956 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 9957 ins_cost(150); 9958 opcode(0x102); 9959 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 9960 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9961 ins_pipe(int_conditional_float_move); 9962 %} 9963 9964 // ============================================================================ 9965 // Safepoint Instruction 9966 instruct safePoint_poll(iRegP poll) %{ 9967 match(SafePoint poll); 9968 effect(USE poll); 9969 9970 size(4); 9971 #ifdef _LP64 9972 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 9973 #else 9974 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %} 9975 #endif 9976 ins_encode %{ 9977 __ relocate(relocInfo::poll_type); 9978 __ ld_ptr($poll$$Register, 0, G0); 9979 %} 9980 ins_pipe(loadPollP); 9981 %} 9982 9983 // ============================================================================ 9984 // Call Instructions 9985 // Call Java Static Instruction 9986 instruct CallStaticJavaDirect( method meth ) %{ 9987 match(CallStaticJava); 9988 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 9989 effect(USE meth); 9990 9991 size(8); 9992 ins_cost(CALL_COST); 9993 format %{ "CALL,static ; NOP ==> " %} 9994 ins_encode( Java_Static_Call( meth ), call_epilog ); 9995 ins_avoid_back_to_back(AVOID_BEFORE); 9996 ins_pipe(simple_call); 9997 %} 9998 9999 // Call Java Static Instruction (method handle version) 10000 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 10001 match(CallStaticJava); 10002 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 10003 effect(USE meth, KILL l7_mh_SP_save); 10004 10005 size(16); 10006 ins_cost(CALL_COST); 10007 format %{ "CALL,static/MethodHandle" %} 10008 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 10009 ins_pipe(simple_call); 10010 %} 10011 10012 // Call Java Dynamic Instruction 10013 instruct CallDynamicJavaDirect( method meth ) %{ 10014 match(CallDynamicJava); 10015 effect(USE meth); 10016 10017 ins_cost(CALL_COST); 10018 format %{ "SET (empty),R_G5\n\t" 10019 "CALL,dynamic ; NOP ==> " %} 10020 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 10021 ins_pipe(call); 10022 %} 10023 10024 // Call Runtime Instruction 10025 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 10026 match(CallRuntime); 10027 effect(USE meth, KILL l7); 10028 ins_cost(CALL_COST); 10029 format %{ "CALL,runtime" %} 10030 ins_encode( Java_To_Runtime( meth ), 10031 call_epilog, adjust_long_from_native_call ); 10032 ins_avoid_back_to_back(AVOID_BEFORE); 10033 ins_pipe(simple_call); 10034 %} 10035 10036 // Call runtime without safepoint - same as CallRuntime 10037 instruct CallLeafDirect(method meth, l7RegP l7) %{ 10038 match(CallLeaf); 10039 effect(USE meth, KILL l7); 10040 ins_cost(CALL_COST); 10041 format %{ "CALL,runtime leaf" %} 10042 ins_encode( Java_To_Runtime( meth ), 10043 call_epilog, 10044 adjust_long_from_native_call ); 10045 ins_avoid_back_to_back(AVOID_BEFORE); 10046 ins_pipe(simple_call); 10047 %} 10048 10049 // Call runtime without safepoint - same as CallLeaf 10050 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 10051 match(CallLeafNoFP); 10052 effect(USE meth, KILL l7); 10053 ins_cost(CALL_COST); 10054 format %{ "CALL,runtime leaf nofp" %} 10055 ins_encode( Java_To_Runtime( meth ), 10056 call_epilog, 10057 adjust_long_from_native_call ); 10058 ins_avoid_back_to_back(AVOID_BEFORE); 10059 ins_pipe(simple_call); 10060 %} 10061 10062 // Tail Call; Jump from runtime stub to Java code. 10063 // Also known as an 'interprocedural jump'. 10064 // Target of jump will eventually return to caller. 10065 // TailJump below removes the return address. 10066 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 10067 match(TailCall jump_target method_oop ); 10068 10069 ins_cost(CALL_COST); 10070 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 10071 ins_encode(form_jmpl(jump_target)); 10072 ins_avoid_back_to_back(AVOID_BEFORE); 10073 ins_pipe(tail_call); 10074 %} 10075 10076 10077 // Return Instruction 10078 instruct Ret() %{ 10079 match(Return); 10080 10081 // The epilogue node did the ret already. 10082 size(0); 10083 format %{ "! return" %} 10084 ins_encode(); 10085 ins_pipe(empty); 10086 %} 10087 10088 10089 // Tail Jump; remove the return address; jump to target. 10090 // TailCall above leaves the return address around. 10091 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 10092 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 10093 // "restore" before this instruction (in Epilogue), we need to materialize it 10094 // in %i0. 10095 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 10096 match( TailJump jump_target ex_oop ); 10097 ins_cost(CALL_COST); 10098 format %{ "! discard R_O7\n\t" 10099 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 10100 ins_encode(form_jmpl_set_exception_pc(jump_target)); 10101 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 10102 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 10103 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 10104 ins_avoid_back_to_back(AVOID_BEFORE); 10105 ins_pipe(tail_call); 10106 %} 10107 10108 // Create exception oop: created by stack-crawling runtime code. 10109 // Created exception is now available to this handler, and is setup 10110 // just prior to jumping to this handler. No code emitted. 10111 instruct CreateException( o0RegP ex_oop ) 10112 %{ 10113 match(Set ex_oop (CreateEx)); 10114 ins_cost(0); 10115 10116 size(0); 10117 // use the following format syntax 10118 format %{ "! exception oop is in R_O0; no code emitted" %} 10119 ins_encode(); 10120 ins_pipe(empty); 10121 %} 10122 10123 10124 // Rethrow exception: 10125 // The exception oop will come in the first argument position. 10126 // Then JUMP (not call) to the rethrow stub code. 10127 instruct RethrowException() 10128 %{ 10129 match(Rethrow); 10130 ins_cost(CALL_COST); 10131 10132 // use the following format syntax 10133 format %{ "Jmp rethrow_stub" %} 10134 ins_encode(enc_rethrow); 10135 ins_avoid_back_to_back(AVOID_BEFORE); 10136 ins_pipe(tail_call); 10137 %} 10138 10139 10140 // Die now 10141 instruct ShouldNotReachHere( ) 10142 %{ 10143 match(Halt); 10144 ins_cost(CALL_COST); 10145 10146 size(4); 10147 // Use the following format syntax 10148 format %{ "ILLTRAP ; ShouldNotReachHere" %} 10149 ins_encode( form2_illtrap() ); 10150 ins_pipe(tail_call); 10151 %} 10152 10153 // ============================================================================ 10154 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 10155 // array for an instance of the superklass. Set a hidden internal cache on a 10156 // hit (cache is checked with exposed code in gen_subtype_check()). Return 10157 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 10158 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 10159 match(Set index (PartialSubtypeCheck sub super)); 10160 effect( KILL pcc, KILL o7 ); 10161 ins_cost(DEFAULT_COST*10); 10162 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 10163 ins_encode( enc_PartialSubtypeCheck() ); 10164 ins_avoid_back_to_back(AVOID_BEFORE); 10165 ins_pipe(partial_subtype_check_pipe); 10166 %} 10167 10168 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 10169 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 10170 effect( KILL idx, KILL o7 ); 10171 ins_cost(DEFAULT_COST*10); 10172 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 10173 ins_encode( enc_PartialSubtypeCheck() ); 10174 ins_avoid_back_to_back(AVOID_BEFORE); 10175 ins_pipe(partial_subtype_check_pipe); 10176 %} 10177 10178 10179 // ============================================================================ 10180 // inlined locking and unlocking 10181 10182 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10183 match(Set pcc (FastLock object box)); 10184 10185 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10186 ins_cost(100); 10187 10188 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10189 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 10190 ins_pipe(long_memory_op); 10191 %} 10192 10193 10194 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10195 match(Set pcc (FastUnlock object box)); 10196 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10197 ins_cost(100); 10198 10199 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10200 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 10201 ins_pipe(long_memory_op); 10202 %} 10203 10204 // The encodings are generic. 10205 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 10206 predicate(!use_block_zeroing(n->in(2)) ); 10207 match(Set dummy (ClearArray cnt base)); 10208 effect(TEMP temp, KILL ccr); 10209 ins_cost(300); 10210 format %{ "MOV $cnt,$temp\n" 10211 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 10212 " BRge loop\t\t! Clearing loop\n" 10213 " STX G0,[$base+$temp]\t! delay slot" %} 10214 10215 ins_encode %{ 10216 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 10217 Register nof_bytes_arg = $cnt$$Register; 10218 Register nof_bytes_tmp = $temp$$Register; 10219 Register base_pointer_arg = $base$$Register; 10220 10221 Label loop; 10222 __ mov(nof_bytes_arg, nof_bytes_tmp); 10223 10224 // Loop and clear, walking backwards through the array. 10225 // nof_bytes_tmp (if >0) is always the number of bytes to zero 10226 __ bind(loop); 10227 __ deccc(nof_bytes_tmp, 8); 10228 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 10229 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 10230 // %%%% this mini-loop must not cross a cache boundary! 10231 %} 10232 ins_pipe(long_memory_op); 10233 %} 10234 10235 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 10236 predicate(use_block_zeroing(n->in(2))); 10237 match(Set dummy (ClearArray cnt base)); 10238 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 10239 ins_cost(300); 10240 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10241 10242 ins_encode %{ 10243 10244 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10245 Register to = $base$$Register; 10246 Register count = $cnt$$Register; 10247 10248 Label Ldone; 10249 __ nop(); // Separate short branches 10250 // Use BIS for zeroing (temp is not used). 10251 __ bis_zeroing(to, count, G0, Ldone); 10252 __ bind(Ldone); 10253 10254 %} 10255 ins_pipe(long_memory_op); 10256 %} 10257 10258 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 10259 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 10260 match(Set dummy (ClearArray cnt base)); 10261 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 10262 ins_cost(300); 10263 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10264 10265 ins_encode %{ 10266 10267 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10268 Register to = $base$$Register; 10269 Register count = $cnt$$Register; 10270 Register temp = $tmp$$Register; 10271 10272 Label Ldone; 10273 __ nop(); // Separate short branches 10274 // Use BIS for zeroing 10275 __ bis_zeroing(to, count, temp, Ldone); 10276 __ bind(Ldone); 10277 10278 %} 10279 ins_pipe(long_memory_op); 10280 %} 10281 10282 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10283 o7RegI tmp, flagsReg ccr) %{ 10284 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10285 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10286 ins_cost(300); 10287 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10288 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) ); 10289 ins_pipe(long_memory_op); 10290 %} 10291 10292 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10293 o7RegI tmp, flagsReg ccr) %{ 10294 match(Set result (StrEquals (Binary str1 str2) cnt)); 10295 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10296 ins_cost(300); 10297 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %} 10298 ins_encode( enc_String_Equals(str1, str2, cnt, result) ); 10299 ins_pipe(long_memory_op); 10300 %} 10301 10302 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10303 o7RegI tmp2, flagsReg ccr) %{ 10304 match(Set result (AryEq ary1 ary2)); 10305 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10306 ins_cost(300); 10307 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10308 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result)); 10309 ins_pipe(long_memory_op); 10310 %} 10311 10312 10313 //---------- Zeros Count Instructions ------------------------------------------ 10314 10315 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10316 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10317 match(Set dst (CountLeadingZerosI src)); 10318 effect(TEMP dst, TEMP tmp, KILL cr); 10319 10320 // x |= (x >> 1); 10321 // x |= (x >> 2); 10322 // x |= (x >> 4); 10323 // x |= (x >> 8); 10324 // x |= (x >> 16); 10325 // return (WORDBITS - popc(x)); 10326 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10327 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10328 "OR $dst,$tmp,$dst\n\t" 10329 "SRL $dst,2,$tmp\n\t" 10330 "OR $dst,$tmp,$dst\n\t" 10331 "SRL $dst,4,$tmp\n\t" 10332 "OR $dst,$tmp,$dst\n\t" 10333 "SRL $dst,8,$tmp\n\t" 10334 "OR $dst,$tmp,$dst\n\t" 10335 "SRL $dst,16,$tmp\n\t" 10336 "OR $dst,$tmp,$dst\n\t" 10337 "POPC $dst,$dst\n\t" 10338 "MOV 32,$tmp\n\t" 10339 "SUB $tmp,$dst,$dst" %} 10340 ins_encode %{ 10341 Register Rdst = $dst$$Register; 10342 Register Rsrc = $src$$Register; 10343 Register Rtmp = $tmp$$Register; 10344 __ srl(Rsrc, 1, Rtmp); 10345 __ srl(Rsrc, 0, Rdst); 10346 __ or3(Rdst, Rtmp, Rdst); 10347 __ srl(Rdst, 2, Rtmp); 10348 __ or3(Rdst, Rtmp, Rdst); 10349 __ srl(Rdst, 4, Rtmp); 10350 __ or3(Rdst, Rtmp, Rdst); 10351 __ srl(Rdst, 8, Rtmp); 10352 __ or3(Rdst, Rtmp, Rdst); 10353 __ srl(Rdst, 16, Rtmp); 10354 __ or3(Rdst, Rtmp, Rdst); 10355 __ popc(Rdst, Rdst); 10356 __ mov(BitsPerInt, Rtmp); 10357 __ sub(Rtmp, Rdst, Rdst); 10358 %} 10359 ins_pipe(ialu_reg); 10360 %} 10361 10362 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10363 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10364 match(Set dst (CountLeadingZerosL src)); 10365 effect(TEMP dst, TEMP tmp, KILL cr); 10366 10367 // x |= (x >> 1); 10368 // x |= (x >> 2); 10369 // x |= (x >> 4); 10370 // x |= (x >> 8); 10371 // x |= (x >> 16); 10372 // x |= (x >> 32); 10373 // return (WORDBITS - popc(x)); 10374 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10375 "OR $src,$tmp,$dst\n\t" 10376 "SRLX $dst,2,$tmp\n\t" 10377 "OR $dst,$tmp,$dst\n\t" 10378 "SRLX $dst,4,$tmp\n\t" 10379 "OR $dst,$tmp,$dst\n\t" 10380 "SRLX $dst,8,$tmp\n\t" 10381 "OR $dst,$tmp,$dst\n\t" 10382 "SRLX $dst,16,$tmp\n\t" 10383 "OR $dst,$tmp,$dst\n\t" 10384 "SRLX $dst,32,$tmp\n\t" 10385 "OR $dst,$tmp,$dst\n\t" 10386 "POPC $dst,$dst\n\t" 10387 "MOV 64,$tmp\n\t" 10388 "SUB $tmp,$dst,$dst" %} 10389 ins_encode %{ 10390 Register Rdst = $dst$$Register; 10391 Register Rsrc = $src$$Register; 10392 Register Rtmp = $tmp$$Register; 10393 __ srlx(Rsrc, 1, Rtmp); 10394 __ or3( Rsrc, Rtmp, Rdst); 10395 __ srlx(Rdst, 2, Rtmp); 10396 __ or3( Rdst, Rtmp, Rdst); 10397 __ srlx(Rdst, 4, Rtmp); 10398 __ or3( Rdst, Rtmp, Rdst); 10399 __ srlx(Rdst, 8, Rtmp); 10400 __ or3( Rdst, Rtmp, Rdst); 10401 __ srlx(Rdst, 16, Rtmp); 10402 __ or3( Rdst, Rtmp, Rdst); 10403 __ srlx(Rdst, 32, Rtmp); 10404 __ or3( Rdst, Rtmp, Rdst); 10405 __ popc(Rdst, Rdst); 10406 __ mov(BitsPerLong, Rtmp); 10407 __ sub(Rtmp, Rdst, Rdst); 10408 %} 10409 ins_pipe(ialu_reg); 10410 %} 10411 10412 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{ 10413 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10414 match(Set dst (CountTrailingZerosI src)); 10415 effect(TEMP dst, KILL cr); 10416 10417 // return popc(~x & (x - 1)); 10418 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10419 "ANDN $dst,$src,$dst\n\t" 10420 "SRL $dst,R_G0,$dst\n\t" 10421 "POPC $dst,$dst" %} 10422 ins_encode %{ 10423 Register Rdst = $dst$$Register; 10424 Register Rsrc = $src$$Register; 10425 __ sub(Rsrc, 1, Rdst); 10426 __ andn(Rdst, Rsrc, Rdst); 10427 __ srl(Rdst, G0, Rdst); 10428 __ popc(Rdst, Rdst); 10429 %} 10430 ins_pipe(ialu_reg); 10431 %} 10432 10433 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10434 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10435 match(Set dst (CountTrailingZerosL src)); 10436 effect(TEMP dst, KILL cr); 10437 10438 // return popc(~x & (x - 1)); 10439 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10440 "ANDN $dst,$src,$dst\n\t" 10441 "POPC $dst,$dst" %} 10442 ins_encode %{ 10443 Register Rdst = $dst$$Register; 10444 Register Rsrc = $src$$Register; 10445 __ sub(Rsrc, 1, Rdst); 10446 __ andn(Rdst, Rsrc, Rdst); 10447 __ popc(Rdst, Rdst); 10448 %} 10449 ins_pipe(ialu_reg); 10450 %} 10451 10452 10453 //---------- Population Count Instructions ------------------------------------- 10454 10455 instruct popCountI(iRegIsafe dst, iRegI src) %{ 10456 predicate(UsePopCountInstruction); 10457 match(Set dst (PopCountI src)); 10458 10459 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t" 10460 "POPC $dst, $dst" %} 10461 ins_encode %{ 10462 __ srl($src$$Register, G0, $dst$$Register); 10463 __ popc($dst$$Register, $dst$$Register); 10464 %} 10465 ins_pipe(ialu_reg); 10466 %} 10467 10468 // Note: Long.bitCount(long) returns an int. 10469 instruct popCountL(iRegIsafe dst, iRegL src) %{ 10470 predicate(UsePopCountInstruction); 10471 match(Set dst (PopCountL src)); 10472 10473 format %{ "POPC $src, $dst" %} 10474 ins_encode %{ 10475 __ popc($src$$Register, $dst$$Register); 10476 %} 10477 ins_pipe(ialu_reg); 10478 %} 10479 10480 10481 // ============================================================================ 10482 //------------Bytes reverse-------------------------------------------------- 10483 10484 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10485 match(Set dst (ReverseBytesI src)); 10486 10487 // Op cost is artificially doubled to make sure that load or store 10488 // instructions are preferred over this one which requires a spill 10489 // onto a stack slot. 10490 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10491 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10492 10493 ins_encode %{ 10494 __ set($src$$disp + STACK_BIAS, O7); 10495 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10496 %} 10497 ins_pipe( iload_mem ); 10498 %} 10499 10500 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10501 match(Set dst (ReverseBytesL src)); 10502 10503 // Op cost is artificially doubled to make sure that load or store 10504 // instructions are preferred over this one which requires a spill 10505 // onto a stack slot. 10506 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10507 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10508 10509 ins_encode %{ 10510 __ set($src$$disp + STACK_BIAS, O7); 10511 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10512 %} 10513 ins_pipe( iload_mem ); 10514 %} 10515 10516 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10517 match(Set dst (ReverseBytesUS src)); 10518 10519 // Op cost is artificially doubled to make sure that load or store 10520 // instructions are preferred over this one which requires a spill 10521 // onto a stack slot. 10522 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10523 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10524 10525 ins_encode %{ 10526 // the value was spilled as an int so bias the load 10527 __ set($src$$disp + STACK_BIAS + 2, O7); 10528 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10529 %} 10530 ins_pipe( iload_mem ); 10531 %} 10532 10533 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10534 match(Set dst (ReverseBytesS src)); 10535 10536 // Op cost is artificially doubled to make sure that load or store 10537 // instructions are preferred over this one which requires a spill 10538 // onto a stack slot. 10539 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10540 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10541 10542 ins_encode %{ 10543 // the value was spilled as an int so bias the load 10544 __ set($src$$disp + STACK_BIAS + 2, O7); 10545 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10546 %} 10547 ins_pipe( iload_mem ); 10548 %} 10549 10550 // Load Integer reversed byte order 10551 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10552 match(Set dst (ReverseBytesI (LoadI src))); 10553 10554 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10555 size(4); 10556 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10557 10558 ins_encode %{ 10559 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10560 %} 10561 ins_pipe(iload_mem); 10562 %} 10563 10564 // Load Long - aligned and reversed 10565 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10566 match(Set dst (ReverseBytesL (LoadL src))); 10567 10568 ins_cost(MEMORY_REF_COST); 10569 size(4); 10570 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10571 10572 ins_encode %{ 10573 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10574 %} 10575 ins_pipe(iload_mem); 10576 %} 10577 10578 // Load unsigned short / char reversed byte order 10579 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10580 match(Set dst (ReverseBytesUS (LoadUS src))); 10581 10582 ins_cost(MEMORY_REF_COST); 10583 size(4); 10584 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10585 10586 ins_encode %{ 10587 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10588 %} 10589 ins_pipe(iload_mem); 10590 %} 10591 10592 // Load short reversed byte order 10593 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10594 match(Set dst (ReverseBytesS (LoadS src))); 10595 10596 ins_cost(MEMORY_REF_COST); 10597 size(4); 10598 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10599 10600 ins_encode %{ 10601 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10602 %} 10603 ins_pipe(iload_mem); 10604 %} 10605 10606 // Store Integer reversed byte order 10607 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10608 match(Set dst (StoreI dst (ReverseBytesI src))); 10609 10610 ins_cost(MEMORY_REF_COST); 10611 size(4); 10612 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10613 10614 ins_encode %{ 10615 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10616 %} 10617 ins_pipe(istore_mem_reg); 10618 %} 10619 10620 // Store Long reversed byte order 10621 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10622 match(Set dst (StoreL dst (ReverseBytesL src))); 10623 10624 ins_cost(MEMORY_REF_COST); 10625 size(4); 10626 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10627 10628 ins_encode %{ 10629 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10630 %} 10631 ins_pipe(istore_mem_reg); 10632 %} 10633 10634 // Store unsighed short/char reversed byte order 10635 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10636 match(Set dst (StoreC dst (ReverseBytesUS src))); 10637 10638 ins_cost(MEMORY_REF_COST); 10639 size(4); 10640 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10641 10642 ins_encode %{ 10643 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10644 %} 10645 ins_pipe(istore_mem_reg); 10646 %} 10647 10648 // Store short reversed byte order 10649 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10650 match(Set dst (StoreC dst (ReverseBytesS src))); 10651 10652 ins_cost(MEMORY_REF_COST); 10653 size(4); 10654 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10655 10656 ins_encode %{ 10657 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10658 %} 10659 ins_pipe(istore_mem_reg); 10660 %} 10661 10662 // ====================VECTOR INSTRUCTIONS===================================== 10663 10664 // Load Aligned Packed values into a Double Register 10665 instruct loadV8(regD dst, memory mem) %{ 10666 predicate(n->as_LoadVector()->memory_size() == 8); 10667 match(Set dst (LoadVector mem)); 10668 ins_cost(MEMORY_REF_COST); 10669 size(4); 10670 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %} 10671 ins_encode %{ 10672 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg)); 10673 %} 10674 ins_pipe(floadD_mem); 10675 %} 10676 10677 // Store Vector in Double register to memory 10678 instruct storeV8(memory mem, regD src) %{ 10679 predicate(n->as_StoreVector()->memory_size() == 8); 10680 match(Set mem (StoreVector mem src)); 10681 ins_cost(MEMORY_REF_COST); 10682 size(4); 10683 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %} 10684 ins_encode %{ 10685 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address); 10686 %} 10687 ins_pipe(fstoreD_mem_reg); 10688 %} 10689 10690 // Store Zero into vector in memory 10691 instruct storeV8B_zero(memory mem, immI0 zero) %{ 10692 predicate(n->as_StoreVector()->memory_size() == 8); 10693 match(Set mem (StoreVector mem (ReplicateB zero))); 10694 ins_cost(MEMORY_REF_COST); 10695 size(4); 10696 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %} 10697 ins_encode %{ 10698 __ stx(G0, $mem$$Address); 10699 %} 10700 ins_pipe(fstoreD_mem_zero); 10701 %} 10702 10703 instruct storeV4S_zero(memory mem, immI0 zero) %{ 10704 predicate(n->as_StoreVector()->memory_size() == 8); 10705 match(Set mem (StoreVector mem (ReplicateS zero))); 10706 ins_cost(MEMORY_REF_COST); 10707 size(4); 10708 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %} 10709 ins_encode %{ 10710 __ stx(G0, $mem$$Address); 10711 %} 10712 ins_pipe(fstoreD_mem_zero); 10713 %} 10714 10715 instruct storeV2I_zero(memory mem, immI0 zero) %{ 10716 predicate(n->as_StoreVector()->memory_size() == 8); 10717 match(Set mem (StoreVector mem (ReplicateI zero))); 10718 ins_cost(MEMORY_REF_COST); 10719 size(4); 10720 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %} 10721 ins_encode %{ 10722 __ stx(G0, $mem$$Address); 10723 %} 10724 ins_pipe(fstoreD_mem_zero); 10725 %} 10726 10727 instruct storeV2F_zero(memory mem, immF0 zero) %{ 10728 predicate(n->as_StoreVector()->memory_size() == 8); 10729 match(Set mem (StoreVector mem (ReplicateF zero))); 10730 ins_cost(MEMORY_REF_COST); 10731 size(4); 10732 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %} 10733 ins_encode %{ 10734 __ stx(G0, $mem$$Address); 10735 %} 10736 ins_pipe(fstoreD_mem_zero); 10737 %} 10738 10739 // Replicate scalar to packed byte values into Double register 10740 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10741 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3); 10742 match(Set dst (ReplicateB src)); 10743 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10744 format %{ "SLLX $src,56,$tmp\n\t" 10745 "SRLX $tmp, 8,$tmp2\n\t" 10746 "OR $tmp,$tmp2,$tmp\n\t" 10747 "SRLX $tmp,16,$tmp2\n\t" 10748 "OR $tmp,$tmp2,$tmp\n\t" 10749 "SRLX $tmp,32,$tmp2\n\t" 10750 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10751 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10752 ins_encode %{ 10753 Register Rsrc = $src$$Register; 10754 Register Rtmp = $tmp$$Register; 10755 Register Rtmp2 = $tmp2$$Register; 10756 __ sllx(Rsrc, 56, Rtmp); 10757 __ srlx(Rtmp, 8, Rtmp2); 10758 __ or3 (Rtmp, Rtmp2, Rtmp); 10759 __ srlx(Rtmp, 16, Rtmp2); 10760 __ or3 (Rtmp, Rtmp2, Rtmp); 10761 __ srlx(Rtmp, 32, Rtmp2); 10762 __ or3 (Rtmp, Rtmp2, Rtmp); 10763 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10764 %} 10765 ins_pipe(ialu_reg); 10766 %} 10767 10768 // Replicate scalar to packed byte values into Double stack 10769 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10770 predicate(n->as_Vector()->length() == 8 && UseVIS < 3); 10771 match(Set dst (ReplicateB src)); 10772 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10773 format %{ "SLLX $src,56,$tmp\n\t" 10774 "SRLX $tmp, 8,$tmp2\n\t" 10775 "OR $tmp,$tmp2,$tmp\n\t" 10776 "SRLX $tmp,16,$tmp2\n\t" 10777 "OR $tmp,$tmp2,$tmp\n\t" 10778 "SRLX $tmp,32,$tmp2\n\t" 10779 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10780 "STX $tmp,$dst\t! regL to stkD" %} 10781 ins_encode %{ 10782 Register Rsrc = $src$$Register; 10783 Register Rtmp = $tmp$$Register; 10784 Register Rtmp2 = $tmp2$$Register; 10785 __ sllx(Rsrc, 56, Rtmp); 10786 __ srlx(Rtmp, 8, Rtmp2); 10787 __ or3 (Rtmp, Rtmp2, Rtmp); 10788 __ srlx(Rtmp, 16, Rtmp2); 10789 __ or3 (Rtmp, Rtmp2, Rtmp); 10790 __ srlx(Rtmp, 32, Rtmp2); 10791 __ or3 (Rtmp, Rtmp2, Rtmp); 10792 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10793 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10794 %} 10795 ins_pipe(ialu_reg); 10796 %} 10797 10798 // Replicate scalar constant to packed byte values in Double register 10799 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 10800 predicate(n->as_Vector()->length() == 8); 10801 match(Set dst (ReplicateB con)); 10802 effect(KILL tmp); 10803 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 10804 ins_encode %{ 10805 // XXX This is a quick fix for 6833573. 10806 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 10807 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 10808 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10809 %} 10810 ins_pipe(loadConFD); 10811 %} 10812 10813 // Replicate scalar to packed char/short values into Double register 10814 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10815 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3); 10816 match(Set dst (ReplicateS src)); 10817 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10818 format %{ "SLLX $src,48,$tmp\n\t" 10819 "SRLX $tmp,16,$tmp2\n\t" 10820 "OR $tmp,$tmp2,$tmp\n\t" 10821 "SRLX $tmp,32,$tmp2\n\t" 10822 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10823 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10824 ins_encode %{ 10825 Register Rsrc = $src$$Register; 10826 Register Rtmp = $tmp$$Register; 10827 Register Rtmp2 = $tmp2$$Register; 10828 __ sllx(Rsrc, 48, Rtmp); 10829 __ srlx(Rtmp, 16, Rtmp2); 10830 __ or3 (Rtmp, Rtmp2, Rtmp); 10831 __ srlx(Rtmp, 32, Rtmp2); 10832 __ or3 (Rtmp, Rtmp2, Rtmp); 10833 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10834 %} 10835 ins_pipe(ialu_reg); 10836 %} 10837 10838 // Replicate scalar to packed char/short values into Double stack 10839 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10840 predicate(n->as_Vector()->length() == 4 && UseVIS < 3); 10841 match(Set dst (ReplicateS src)); 10842 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10843 format %{ "SLLX $src,48,$tmp\n\t" 10844 "SRLX $tmp,16,$tmp2\n\t" 10845 "OR $tmp,$tmp2,$tmp\n\t" 10846 "SRLX $tmp,32,$tmp2\n\t" 10847 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10848 "STX $tmp,$dst\t! regL to stkD" %} 10849 ins_encode %{ 10850 Register Rsrc = $src$$Register; 10851 Register Rtmp = $tmp$$Register; 10852 Register Rtmp2 = $tmp2$$Register; 10853 __ sllx(Rsrc, 48, Rtmp); 10854 __ srlx(Rtmp, 16, Rtmp2); 10855 __ or3 (Rtmp, Rtmp2, Rtmp); 10856 __ srlx(Rtmp, 32, Rtmp2); 10857 __ or3 (Rtmp, Rtmp2, Rtmp); 10858 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10859 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10860 %} 10861 ins_pipe(ialu_reg); 10862 %} 10863 10864 // Replicate scalar constant to packed char/short values in Double register 10865 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 10866 predicate(n->as_Vector()->length() == 4); 10867 match(Set dst (ReplicateS con)); 10868 effect(KILL tmp); 10869 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 10870 ins_encode %{ 10871 // XXX This is a quick fix for 6833573. 10872 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 10873 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 10874 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10875 %} 10876 ins_pipe(loadConFD); 10877 %} 10878 10879 // Replicate scalar to packed int values into Double register 10880 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10881 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3); 10882 match(Set dst (ReplicateI src)); 10883 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10884 format %{ "SLLX $src,32,$tmp\n\t" 10885 "SRLX $tmp,32,$tmp2\n\t" 10886 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10887 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10888 ins_encode %{ 10889 Register Rsrc = $src$$Register; 10890 Register Rtmp = $tmp$$Register; 10891 Register Rtmp2 = $tmp2$$Register; 10892 __ sllx(Rsrc, 32, Rtmp); 10893 __ srlx(Rtmp, 32, Rtmp2); 10894 __ or3 (Rtmp, Rtmp2, Rtmp); 10895 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10896 %} 10897 ins_pipe(ialu_reg); 10898 %} 10899 10900 // Replicate scalar to packed int values into Double stack 10901 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10902 predicate(n->as_Vector()->length() == 2 && UseVIS < 3); 10903 match(Set dst (ReplicateI src)); 10904 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10905 format %{ "SLLX $src,32,$tmp\n\t" 10906 "SRLX $tmp,32,$tmp2\n\t" 10907 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10908 "STX $tmp,$dst\t! regL to stkD" %} 10909 ins_encode %{ 10910 Register Rsrc = $src$$Register; 10911 Register Rtmp = $tmp$$Register; 10912 Register Rtmp2 = $tmp2$$Register; 10913 __ sllx(Rsrc, 32, Rtmp); 10914 __ srlx(Rtmp, 32, Rtmp2); 10915 __ or3 (Rtmp, Rtmp2, Rtmp); 10916 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10917 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10918 %} 10919 ins_pipe(ialu_reg); 10920 %} 10921 10922 // Replicate scalar zero constant to packed int values in Double register 10923 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 10924 predicate(n->as_Vector()->length() == 2); 10925 match(Set dst (ReplicateI con)); 10926 effect(KILL tmp); 10927 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 10928 ins_encode %{ 10929 // XXX This is a quick fix for 6833573. 10930 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 10931 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 10932 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10933 %} 10934 ins_pipe(loadConFD); 10935 %} 10936 10937 // Replicate scalar to packed float values into Double stack 10938 instruct Repl2F_stk(stackSlotD dst, regF src) %{ 10939 predicate(n->as_Vector()->length() == 2); 10940 match(Set dst (ReplicateF src)); 10941 ins_cost(MEMORY_REF_COST*2); 10942 format %{ "STF $src,$dst.hi\t! packed2F\n\t" 10943 "STF $src,$dst.lo" %} 10944 opcode(Assembler::stf_op3); 10945 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src)); 10946 ins_pipe(fstoreF_stk_reg); 10947 %} 10948 10949 // Replicate scalar zero constant to packed float values in Double register 10950 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{ 10951 predicate(n->as_Vector()->length() == 2); 10952 match(Set dst (ReplicateF con)); 10953 effect(KILL tmp); 10954 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %} 10955 ins_encode %{ 10956 // XXX This is a quick fix for 6833573. 10957 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister); 10958 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register); 10959 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10960 %} 10961 ins_pipe(loadConFD); 10962 %} 10963 10964 //----------PEEPHOLE RULES----------------------------------------------------- 10965 // These must follow all instruction definitions as they use the names 10966 // defined in the instructions definitions. 10967 // 10968 // peepmatch ( root_instr_name [preceding_instruction]* ); 10969 // 10970 // peepconstraint %{ 10971 // (instruction_number.operand_name relational_op instruction_number.operand_name 10972 // [, ...] ); 10973 // // instruction numbers are zero-based using left to right order in peepmatch 10974 // 10975 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10976 // // provide an instruction_number.operand_name for each operand that appears 10977 // // in the replacement instruction's match rule 10978 // 10979 // ---------VM FLAGS--------------------------------------------------------- 10980 // 10981 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10982 // 10983 // Each peephole rule is given an identifying number starting with zero and 10984 // increasing by one in the order seen by the parser. An individual peephole 10985 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10986 // on the command-line. 10987 // 10988 // ---------CURRENT LIMITATIONS---------------------------------------------- 10989 // 10990 // Only match adjacent instructions in same basic block 10991 // Only equality constraints 10992 // Only constraints between operands, not (0.dest_reg == EAX_enc) 10993 // Only one replacement instruction 10994 // 10995 // ---------EXAMPLE---------------------------------------------------------- 10996 // 10997 // // pertinent parts of existing instructions in architecture description 10998 // instruct movI(eRegI dst, eRegI src) %{ 10999 // match(Set dst (CopyI src)); 11000 // %} 11001 // 11002 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 11003 // match(Set dst (AddI dst src)); 11004 // effect(KILL cr); 11005 // %} 11006 // 11007 // // Change (inc mov) to lea 11008 // peephole %{ 11009 // // increment preceeded by register-register move 11010 // peepmatch ( incI_eReg movI ); 11011 // // require that the destination register of the increment 11012 // // match the destination register of the move 11013 // peepconstraint ( 0.dst == 1.dst ); 11014 // // construct a replacement instruction that sets 11015 // // the destination to ( move's source register + one ) 11016 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 11017 // %} 11018 // 11019 11020 // // Change load of spilled value to only a spill 11021 // instruct storeI(memory mem, eRegI src) %{ 11022 // match(Set mem (StoreI mem src)); 11023 // %} 11024 // 11025 // instruct loadI(eRegI dst, memory mem) %{ 11026 // match(Set dst (LoadI mem)); 11027 // %} 11028 // 11029 // peephole %{ 11030 // peepmatch ( loadI storeI ); 11031 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 11032 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 11033 // %} 11034 11035 //----------SMARTSPILL RULES--------------------------------------------------- 11036 // These must follow all instruction definitions as they use the names 11037 // defined in the instructions definitions. 11038 // 11039 // SPARC will probably not have any of these rules due to RISC instruction set. 11040 11041 //----------PIPELINE----------------------------------------------------------- 11042 // Rules which define the behavior of the target architectures pipeline.