1 // 2 // Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // SPARC Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 register %{ 32 //----------Architecture Description Register Definitions---------------------- 33 // General Registers 34 // "reg_def" name ( register save type, C convention save type, 35 // ideal register type, encoding, vm name ); 36 // Register Save Types: 37 // 38 // NS = No-Save: The register allocator assumes that these registers 39 // can be used without saving upon entry to the method, & 40 // that they do not need to be saved at call sites. 41 // 42 // SOC = Save-On-Call: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, 44 // but that they must be saved at call sites. 45 // 46 // SOE = Save-On-Entry: The register allocator assumes that these registers 47 // must be saved before using them upon entry to the 48 // method, but they do not need to be saved at call 49 // sites. 50 // 51 // AS = Always-Save: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, & that they must be saved at call sites. 54 // 55 // Ideal Register Type is used to determine how to save & restore a 56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 58 // 59 // The encoding number is the actual bit-pattern placed into the opcodes. 60 61 62 // ---------------------------- 63 // Integer/Long Registers 64 // ---------------------------- 65 66 // Need to expose the hi/lo aspect of 64-bit registers 67 // This register set is used for both the 64-bit build and 68 // the 32-bit build with 1-register longs. 69 70 // Global Registers 0-7 71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next()); 72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg()); 73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next()); 74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg()); 75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next()); 76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg()); 77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next()); 78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg()); 79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next()); 80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg()); 81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next()); 82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg()); 83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next()); 84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg()); 85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next()); 86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg()); 87 88 // Output Registers 0-7 89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next()); 90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg()); 91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next()); 92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg()); 93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next()); 94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg()); 95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next()); 96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg()); 97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next()); 98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg()); 99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next()); 100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg()); 101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next()); 102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg()); 103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next()); 104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg()); 105 106 // Local Registers 0-7 107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next()); 108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg()); 109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next()); 110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg()); 111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next()); 112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg()); 113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next()); 114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg()); 115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next()); 116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg()); 117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next()); 118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg()); 119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next()); 120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg()); 121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next()); 122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg()); 123 124 // Input Registers 0-7 125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next()); 126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg()); 127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next()); 128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg()); 129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next()); 130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg()); 131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next()); 132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg()); 133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next()); 134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg()); 135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next()); 136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg()); 137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next()); 138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg()); 139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next()); 140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg()); 141 142 // ---------------------------- 143 // Float/Double Registers 144 // ---------------------------- 145 146 // Float Registers 147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()); 148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()); 149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()); 150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()); 151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()); 152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()); 153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()); 154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()); 155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()); 156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()); 157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg()); 158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg()); 159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg()); 160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg()); 161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg()); 162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg()); 163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg()); 164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg()); 165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg()); 166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg()); 167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg()); 168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg()); 169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg()); 170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg()); 171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg()); 172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg()); 173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg()); 174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg()); 175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg()); 176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg()); 177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg()); 178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg()); 179 180 // Double Registers 181 // The rules of ADL require that double registers be defined in pairs. 182 // Each pair must be two 32-bit values, but not necessarily a pair of 183 // single float registers. In each pair, ADLC-assigned register numbers 184 // must be adjacent, with the lower number even. Finally, when the 185 // CPU stores such a register pair to memory, the word associated with 186 // the lower ADLC-assigned number must be stored to the lower address. 187 188 // These definitions specify the actual bit encodings of the sparc 189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 190 // wants 0-63, so we have to convert every time we want to use fp regs 191 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 192 // 255 is a flag meaning "don't go here". 193 // I believe we can't handle callee-save doubles D32 and up until 194 // the place in the sparc stack crawler that asserts on the 255 is 195 // fixed up. 196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg()); 197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next()); 198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg()); 199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next()); 200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg()); 201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next()); 202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg()); 203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next()); 204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg()); 205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next()); 206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()); 207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next()); 208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()); 209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next()); 210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()); 211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next()); 212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()); 213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next()); 214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()); 215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next()); 216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()); 217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next()); 218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()); 219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next()); 220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()); 221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next()); 222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()); 223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next()); 224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()); 225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next()); 226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()); 227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next()); 228 229 230 // ---------------------------- 231 // Special Registers 232 // Condition Codes Flag Registers 233 // I tried to break out ICC and XCC but it's not very pretty. 234 // Every Sparc instruction which defs/kills one also kills the other. 235 // Hence every compare instruction which defs one kind of flags ends 236 // up needing a kill of the other. 237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 238 239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad()); 240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad()); 241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad()); 242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad()); 243 244 // ---------------------------- 245 // Specify the enum values for the registers. These enums are only used by the 246 // OptoReg "class". We can convert these enum values at will to VMReg when needed 247 // for visibility to the rest of the vm. The order of this enum influences the 248 // register allocator so having the freedom to set this order and not be stuck 249 // with the order that is natural for the rest of the vm is worth it. 250 alloc_class chunk0( 251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H, 252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H, 253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H, 254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H); 255 256 // Note that a register is not allocatable unless it is also mentioned 257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg. 258 259 alloc_class chunk1( 260 // The first registers listed here are those most likely to be used 261 // as temporaries. We move F0..F7 away from the front of the list, 262 // to reduce the likelihood of interferences with parameters and 263 // return values. Likewise, we avoid using F0/F1 for parameters, 264 // since they are used for return values. 265 // This FPU fine-tuning is worth about 1% on the SPEC geomean. 266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23, 268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31, 269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values 270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x, 271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x, 273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x); 274 275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3); 276 277 //----------Architecture Description Register Classes-------------------------- 278 // Several register classes are automatically defined based upon information in 279 // this architecture description. 280 // 1) reg_class inline_cache_reg ( as defined in frame section ) 281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // G0 is not included in integer class since it has special meaning. 286 reg_class g0_reg(R_G0); 287 288 // ---------------------------- 289 // Integer Register Classes 290 // ---------------------------- 291 // Exclusions from i_reg: 292 // R_G0: hardwired zero 293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java) 294 // R_G6: reserved by Solaris ABI to tools 295 // R_G7: reserved by Solaris ABI to libthread 296 // R_O7: Used as a temp in many encodings 297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 298 299 // Class for all integer registers, except the G registers. This is used for 300 // encodings which use G registers as temps. The regular inputs to such 301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator 302 // will not put an input into a temp register. 303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 304 305 reg_class g1_regI(R_G1); 306 reg_class g3_regI(R_G3); 307 reg_class g4_regI(R_G4); 308 reg_class o0_regI(R_O0); 309 reg_class o7_regI(R_O7); 310 311 // ---------------------------- 312 // Pointer Register Classes 313 // ---------------------------- 314 #ifdef _LP64 315 // 64-bit build means 64-bit pointers means hi/lo pairs 316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 320 // Lock encodings use G3 and G4 internally 321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5, 322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, 323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 ); 325 // Special class for storeP instructions, which can store SP or RPC to TLS. 326 // It is also used for memory addressing, allowing direct TLS addressing. 327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5, 328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP, 329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7, 330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP ); 331 // R_L7 is the lowest-priority callee-save (i.e., NS) register 332 // We use it to save R_G2 across calls out of Java. 333 reg_class l7_regP(R_L7H,R_L7); 334 335 // Other special pointer regs 336 reg_class g1_regP(R_G1H,R_G1); 337 reg_class g2_regP(R_G2H,R_G2); 338 reg_class g3_regP(R_G3H,R_G3); 339 reg_class g4_regP(R_G4H,R_G4); 340 reg_class g5_regP(R_G5H,R_G5); 341 reg_class i0_regP(R_I0H,R_I0); 342 reg_class o0_regP(R_O0H,R_O0); 343 reg_class o1_regP(R_O1H,R_O1); 344 reg_class o2_regP(R_O2H,R_O2); 345 reg_class o7_regP(R_O7H,R_O7); 346 347 #else // _LP64 348 // 32-bit build means 32-bit pointers means 1 register. 349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5, 350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 353 // Lock encodings use G3 and G4 internally 354 reg_class lock_ptr_reg(R_G1, R_G5, 355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5, 356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5); 358 // Special class for storeP instructions, which can store SP or RPC to TLS. 359 // It is also used for memory addressing, allowing direct TLS addressing. 360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5, 361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP, 362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7, 363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP); 364 // R_L7 is the lowest-priority callee-save (i.e., NS) register 365 // We use it to save R_G2 across calls out of Java. 366 reg_class l7_regP(R_L7); 367 368 // Other special pointer regs 369 reg_class g1_regP(R_G1); 370 reg_class g2_regP(R_G2); 371 reg_class g3_regP(R_G3); 372 reg_class g4_regP(R_G4); 373 reg_class g5_regP(R_G5); 374 reg_class i0_regP(R_I0); 375 reg_class o0_regP(R_O0); 376 reg_class o1_regP(R_O1); 377 reg_class o2_regP(R_O2); 378 reg_class o7_regP(R_O7); 379 #endif // _LP64 380 381 382 // ---------------------------- 383 // Long Register Classes 384 // ---------------------------- 385 // Longs in 1 register. Aligned adjacent hi/lo pairs. 386 // Note: O7 is never in this class; it is sometimes used as an encoding temp. 387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5 388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5 389 #ifdef _LP64 390 // 64-bit, longs in 1 register: use all 64-bit integer registers 391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's. 392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7 393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 394 #endif // _LP64 395 ); 396 397 reg_class g1_regL(R_G1H,R_G1); 398 reg_class g3_regL(R_G3H,R_G3); 399 reg_class o2_regL(R_O2H,R_O2); 400 reg_class o7_regL(R_O7H,R_O7); 401 402 // ---------------------------- 403 // Special Class for Condition Code Flags Register 404 reg_class int_flags(CCR); 405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3); 406 reg_class float_flag0(FCC0); 407 408 409 // ---------------------------- 410 // Float Point Register Classes 411 // ---------------------------- 412 // Skip F30/F31, they are reserved for mem-mem copies 413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 414 415 // Paired floating point registers--they show up in the same order as the floats, 416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29, 419 /* Use extra V9 double registers; this AD file does not support V8 */ 420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x, 421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x 422 ); 423 424 // Paired floating point registers--they show up in the same order as the floats, 425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs. 426 // This class is usable for mis-aligned loads as happen in I2C adapters. 427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15, 428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29); 429 %} 430 431 //----------DEFINITION BLOCK--------------------------------------------------- 432 // Define name --> value mappings to inform the ADLC of an integer valued name 433 // Current support includes integer values in the range [0, 0x7FFFFFFF] 434 // Format: 435 // int_def <name> ( <int_value>, <expression>); 436 // Generated Code in ad_<arch>.hpp 437 // #define <name> (<expression>) 438 // // value == <int_value> 439 // Generated code in ad_<arch>.cpp adlc_verification() 440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 441 // 442 definitions %{ 443 // The default cost (of an ALU instruction). 444 int_def DEFAULT_COST ( 100, 100); 445 int_def HUGE_COST (1000000, 1000000); 446 447 // Memory refs are twice as expensive as run-of-the-mill. 448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2); 449 450 // Branches are even more expensive. 451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3); 452 int_def CALL_COST ( 300, DEFAULT_COST * 3); 453 %} 454 455 456 //----------SOURCE BLOCK------------------------------------------------------- 457 // This is a block of C++ code which provides values, functions, and 458 // definitions necessary in the rest of the architecture description 459 source_hpp %{ 460 // Header information of the source block. 461 // Method declarations/definitions which are used outside 462 // the ad-scope can conveniently be defined here. 463 // 464 // To keep related declarations/definitions/uses close together, 465 // we switch between source %{ }% and source_hpp %{ }% freely as needed. 466 467 // Must be visible to the DFA in dfa_sparc.cpp 468 extern bool can_branch_register( Node *bol, Node *cmp ); 469 470 extern bool use_block_zeroing(Node* count); 471 472 // Macros to extract hi & lo halves from a long pair. 473 // G0 is not part of any long pair, so assert on that. 474 // Prevents accidentally using G1 instead of G0. 475 #define LONG_HI_REG(x) (x) 476 #define LONG_LO_REG(x) (x) 477 478 class CallStubImpl { 479 480 //-------------------------------------------------------------- 481 //---< Used for optimization in Compile::Shorten_branches >--- 482 //-------------------------------------------------------------- 483 484 public: 485 // Size of call trampoline stub. 486 static uint size_call_trampoline() { 487 return 0; // no call trampolines on this platform 488 } 489 490 // number of relocations needed by a call trampoline stub 491 static uint reloc_call_trampoline() { 492 return 0; // no call trampolines on this platform 493 } 494 }; 495 496 class HandlerImpl { 497 498 public: 499 500 static int emit_exception_handler(CodeBuffer &cbuf); 501 static int emit_deopt_handler(CodeBuffer& cbuf); 502 503 static uint size_exception_handler() { 504 if (TraceJumps) { 505 return (400); // just a guess 506 } 507 return ( NativeJump::instruction_size ); // sethi;jmp;nop 508 } 509 510 static uint size_deopt_handler() { 511 if (TraceJumps) { 512 return (400); // just a guess 513 } 514 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore 515 } 516 }; 517 518 %} 519 520 source %{ 521 #define __ _masm. 522 523 // tertiary op of a LoadP or StoreP encoding 524 #define REGP_OP true 525 526 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding); 527 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding); 528 static Register reg_to_register_object(int register_encoding); 529 530 // Used by the DFA in dfa_sparc.cpp. 531 // Check for being able to use a V9 branch-on-register. Requires a 532 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign- 533 // extended. Doesn't work following an integer ADD, for example, because of 534 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On 535 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and 536 // replace them with zero, which could become sign-extension in a different OS 537 // release. There's no obvious reason why an interrupt will ever fill these 538 // bits with non-zero junk (the registers are reloaded with standard LD 539 // instructions which either zero-fill or sign-fill). 540 bool can_branch_register( Node *bol, Node *cmp ) { 541 if( !BranchOnRegister ) return false; 542 #ifdef _LP64 543 if( cmp->Opcode() == Op_CmpP ) 544 return true; // No problems with pointer compares 545 #endif 546 if( cmp->Opcode() == Op_CmpL ) 547 return true; // No problems with long compares 548 549 if( !SparcV9RegsHiBitsZero ) return false; 550 if( bol->as_Bool()->_test._test != BoolTest::ne && 551 bol->as_Bool()->_test._test != BoolTest::eq ) 552 return false; 553 554 // Check for comparing against a 'safe' value. Any operation which 555 // clears out the high word is safe. Thus, loads and certain shifts 556 // are safe, as are non-negative constants. Any operation which 557 // preserves zero bits in the high word is safe as long as each of its 558 // inputs are safe. Thus, phis and bitwise booleans are safe if their 559 // inputs are safe. At present, the only important case to recognize 560 // seems to be loads. Constants should fold away, and shifts & 561 // logicals can use the 'cc' forms. 562 Node *x = cmp->in(1); 563 if( x->is_Load() ) return true; 564 if( x->is_Phi() ) { 565 for( uint i = 1; i < x->req(); i++ ) 566 if( !x->in(i)->is_Load() ) 567 return false; 568 return true; 569 } 570 return false; 571 } 572 573 bool use_block_zeroing(Node* count) { 574 // Use BIS for zeroing if count is not constant 575 // or it is >= BlockZeroingLowLimit. 576 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit); 577 } 578 579 // **************************************************************************** 580 581 // REQUIRED FUNCTIONALITY 582 583 // !!!!! Special hack to get all type of calls to specify the byte offset 584 // from the start of the call to the point where the return address 585 // will point. 586 // The "return address" is the address of the call instruction, plus 8. 587 588 int MachCallStaticJavaNode::ret_addr_offset() { 589 int offset = NativeCall::instruction_size; // call; delay slot 590 if (_method_handle_invoke) 591 offset += 4; // restore SP 592 return offset; 593 } 594 595 int MachCallDynamicJavaNode::ret_addr_offset() { 596 int vtable_index = this->_vtable_index; 597 if (vtable_index < 0) { 598 // must be invalid_vtable_index, not nonvirtual_vtable_index 599 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 600 return (NativeMovConstReg::instruction_size + 601 NativeCall::instruction_size); // sethi; setlo; call; delay slot 602 } else { 603 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 604 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 605 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 606 int klass_load_size; 607 if (UseCompressedClassPointers) { 608 assert(Universe::heap() != NULL, "java heap should be initialized"); 609 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 610 } else { 611 klass_load_size = 1*BytesPerInstWord; 612 } 613 if (Assembler::is_simm13(v_off)) { 614 return klass_load_size + 615 (2*BytesPerInstWord + // ld_ptr, ld_ptr 616 NativeCall::instruction_size); // call; delay slot 617 } else { 618 return klass_load_size + 619 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr 620 NativeCall::instruction_size); // call; delay slot 621 } 622 } 623 } 624 625 int MachCallRuntimeNode::ret_addr_offset() { 626 #ifdef _LP64 627 if (MacroAssembler::is_far_target(entry_point())) { 628 return NativeFarCall::instruction_size; 629 } else { 630 return NativeCall::instruction_size; 631 } 632 #else 633 return NativeCall::instruction_size; // call; delay slot 634 #endif 635 } 636 637 // Indicate if the safepoint node needs the polling page as an input. 638 // Since Sparc does not have absolute addressing, it does. 639 bool SafePointNode::needs_polling_address_input() { 640 return true; 641 } 642 643 // emit an interrupt that is caught by the debugger (for debugging compiler) 644 void emit_break(CodeBuffer &cbuf) { 645 MacroAssembler _masm(&cbuf); 646 __ breakpoint_trap(); 647 } 648 649 #ifndef PRODUCT 650 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const { 651 st->print("TA"); 652 } 653 #endif 654 655 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 656 emit_break(cbuf); 657 } 658 659 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 660 return MachNode::size(ra_); 661 } 662 663 // Traceable jump 664 void emit_jmpl(CodeBuffer &cbuf, int jump_target) { 665 MacroAssembler _masm(&cbuf); 666 Register rdest = reg_to_register_object(jump_target); 667 __ JMP(rdest, 0); 668 __ delayed()->nop(); 669 } 670 671 // Traceable jump and set exception pc 672 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) { 673 MacroAssembler _masm(&cbuf); 674 Register rdest = reg_to_register_object(jump_target); 675 __ JMP(rdest, 0); 676 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc ); 677 } 678 679 void emit_nop(CodeBuffer &cbuf) { 680 MacroAssembler _masm(&cbuf); 681 __ nop(); 682 } 683 684 void emit_illtrap(CodeBuffer &cbuf) { 685 MacroAssembler _masm(&cbuf); 686 __ illtrap(0); 687 } 688 689 690 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) { 691 assert(n->rule() != loadUB_rule, ""); 692 693 intptr_t offset = 0; 694 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP 695 const Node* addr = n->get_base_and_disp(offset, adr_type); 696 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP"); 697 assert(addr != NULL && addr != (Node*)-1, "invalid addr"); 698 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 699 atype = atype->add_offset(offset); 700 assert(disp32 == offset, "wrong disp32"); 701 return atype->_offset; 702 } 703 704 705 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) { 706 assert(n->rule() != loadUB_rule, ""); 707 708 intptr_t offset = 0; 709 Node* addr = n->in(2); 710 assert(addr->bottom_type()->isa_oopptr() == atype, ""); 711 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) { 712 Node* a = addr->in(2/*AddPNode::Address*/); 713 Node* o = addr->in(3/*AddPNode::Offset*/); 714 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot; 715 atype = a->bottom_type()->is_ptr()->add_offset(offset); 716 assert(atype->isa_oop_ptr(), "still an oop"); 717 } 718 offset = atype->is_ptr()->_offset; 719 if (offset != Type::OffsetBot) offset += disp32; 720 return offset; 721 } 722 723 static inline jdouble replicate_immI(int con, int count, int width) { 724 // Load a constant replicated "count" times with width "width" 725 assert(count*width == 8 && width <= 4, "sanity"); 726 int bit_width = width * 8; 727 jlong val = con; 728 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 729 for (int i = 0; i < count - 1; i++) { 730 val |= (val << bit_width); 731 } 732 jdouble dval = *((jdouble*) &val); // coerce to double type 733 return dval; 734 } 735 736 static inline jdouble replicate_immF(float con) { 737 // Replicate float con 2 times and pack into vector. 738 int val = *((int*)&con); 739 jlong lval = val; 740 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 741 jdouble dval = *((jdouble*) &lval); // coerce to double type 742 return dval; 743 } 744 745 // Standard Sparc opcode form2 field breakdown 746 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 747 f0 &= (1<<19)-1; // Mask displacement to 19 bits 748 int op = (f30 << 30) | 749 (f29 << 29) | 750 (f25 << 25) | 751 (f22 << 22) | 752 (f20 << 20) | 753 (f19 << 19) | 754 (f0 << 0); 755 cbuf.insts()->emit_int32(op); 756 } 757 758 // Standard Sparc opcode form2 field breakdown 759 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) { 760 f0 >>= 10; // Drop 10 bits 761 f0 &= (1<<22)-1; // Mask displacement to 22 bits 762 int op = (f30 << 30) | 763 (f25 << 25) | 764 (f22 << 22) | 765 (f0 << 0); 766 cbuf.insts()->emit_int32(op); 767 } 768 769 // Standard Sparc opcode form3 field breakdown 770 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) { 771 int op = (f30 << 30) | 772 (f25 << 25) | 773 (f19 << 19) | 774 (f14 << 14) | 775 (f5 << 5) | 776 (f0 << 0); 777 cbuf.insts()->emit_int32(op); 778 } 779 780 // Standard Sparc opcode form3 field breakdown 781 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) { 782 simm13 &= (1<<13)-1; // Mask to 13 bits 783 int op = (f30 << 30) | 784 (f25 << 25) | 785 (f19 << 19) | 786 (f14 << 14) | 787 (1 << 13) | // bit to indicate immediate-mode 788 (simm13<<0); 789 cbuf.insts()->emit_int32(op); 790 } 791 792 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 793 simm10 &= (1<<10)-1; // Mask to 10 bits 794 emit3_simm13(cbuf,f30,f25,f19,f14,simm10); 795 } 796 797 #ifdef ASSERT 798 // Helper function for VerifyOops in emit_form3_mem_reg 799 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) { 800 warning("VerifyOops encountered unexpected instruction:"); 801 n->dump(2); 802 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]); 803 } 804 #endif 805 806 807 void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary, 808 int src1_enc, int disp32, int src2_enc, int dst_enc) { 809 810 #ifdef ASSERT 811 // The following code implements the +VerifyOops feature. 812 // It verifies oop values which are loaded into or stored out of 813 // the current method activation. +VerifyOops complements techniques 814 // like ScavengeALot, because it eagerly inspects oops in transit, 815 // as they enter or leave the stack, as opposed to ScavengeALot, 816 // which inspects oops "at rest", in the stack or heap, at safepoints. 817 // For this reason, +VerifyOops can sometimes detect bugs very close 818 // to their point of creation. It can also serve as a cross-check 819 // on the validity of oop maps, when used toegether with ScavengeALot. 820 821 // It would be good to verify oops at other points, especially 822 // when an oop is used as a base pointer for a load or store. 823 // This is presently difficult, because it is hard to know when 824 // a base address is biased or not. (If we had such information, 825 // it would be easy and useful to make a two-argument version of 826 // verify_oop which unbiases the base, and performs verification.) 827 828 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary"); 829 bool is_verified_oop_base = false; 830 bool is_verified_oop_load = false; 831 bool is_verified_oop_store = false; 832 int tmp_enc = -1; 833 if (VerifyOops && src1_enc != R_SP_enc) { 834 // classify the op, mainly for an assert check 835 int st_op = 0, ld_op = 0; 836 switch (primary) { 837 case Assembler::stb_op3: st_op = Op_StoreB; break; 838 case Assembler::sth_op3: st_op = Op_StoreC; break; 839 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0 840 case Assembler::stw_op3: st_op = Op_StoreI; break; 841 case Assembler::std_op3: st_op = Op_StoreL; break; 842 case Assembler::stf_op3: st_op = Op_StoreF; break; 843 case Assembler::stdf_op3: st_op = Op_StoreD; break; 844 845 case Assembler::ldsb_op3: ld_op = Op_LoadB; break; 846 case Assembler::ldub_op3: ld_op = Op_LoadUB; break; 847 case Assembler::lduh_op3: ld_op = Op_LoadUS; break; 848 case Assembler::ldsh_op3: ld_op = Op_LoadS; break; 849 case Assembler::ldx_op3: // may become LoadP or stay LoadI 850 case Assembler::ldsw_op3: // may become LoadP or stay LoadI 851 case Assembler::lduw_op3: ld_op = Op_LoadI; break; 852 case Assembler::ldd_op3: ld_op = Op_LoadL; break; 853 case Assembler::ldf_op3: ld_op = Op_LoadF; break; 854 case Assembler::lddf_op3: ld_op = Op_LoadD; break; 855 case Assembler::prefetch_op3: ld_op = Op_LoadI; break; 856 857 default: ShouldNotReachHere(); 858 } 859 if (tertiary == REGP_OP) { 860 if (st_op == Op_StoreI) st_op = Op_StoreP; 861 else if (ld_op == Op_LoadI) ld_op = Op_LoadP; 862 else ShouldNotReachHere(); 863 if (st_op) { 864 // a store 865 // inputs are (0:control, 1:memory, 2:address, 3:value) 866 Node* n2 = n->in(3); 867 if (n2 != NULL) { 868 const Type* t = n2->bottom_type(); 869 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 870 } 871 } else { 872 // a load 873 const Type* t = n->bottom_type(); 874 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false; 875 } 876 } 877 878 if (ld_op) { 879 // a Load 880 // inputs are (0:control, 1:memory, 2:address) 881 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases 882 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) && 883 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) && 884 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) && 885 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) && 886 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) && 887 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) && 888 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) && 889 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) && 890 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) && 891 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) && 892 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) && 893 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) && 894 !(n->rule() == loadUB_rule)) { 895 verify_oops_warning(n, n->ideal_Opcode(), ld_op); 896 } 897 } else if (st_op) { 898 // a Store 899 // inputs are (0:control, 1:memory, 2:address, 3:value) 900 if (!(n->ideal_Opcode()==st_op) && // Following are special cases 901 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) && 902 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) && 903 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) && 904 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) && 905 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) && 906 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) { 907 verify_oops_warning(n, n->ideal_Opcode(), st_op); 908 } 909 } 910 911 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) { 912 Node* addr = n->in(2); 913 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) { 914 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr? 915 if (atype != NULL) { 916 intptr_t offset = get_offset_from_base(n, atype, disp32); 917 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32); 918 if (offset != offset_2) { 919 get_offset_from_base(n, atype, disp32); 920 get_offset_from_base_2(n, atype, disp32); 921 } 922 assert(offset == offset_2, "different offsets"); 923 if (offset == disp32) { 924 // we now know that src1 is a true oop pointer 925 is_verified_oop_base = true; 926 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) { 927 if( primary == Assembler::ldd_op3 ) { 928 is_verified_oop_base = false; // Cannot 'ldd' into O7 929 } else { 930 tmp_enc = dst_enc; 931 dst_enc = R_O7_enc; // Load into O7; preserve source oop 932 assert(src1_enc != dst_enc, ""); 933 } 934 } 935 } 936 if (st_op && (( offset == oopDesc::klass_offset_in_bytes()) 937 || offset == oopDesc::mark_offset_in_bytes())) { 938 // loading the mark should not be allowed either, but 939 // we don't check this since it conflicts with InlineObjectHash 940 // usage of LoadINode to get the mark. We could keep the 941 // check if we create a new LoadMarkNode 942 // but do not verify the object before its header is initialized 943 ShouldNotReachHere(); 944 } 945 } 946 } 947 } 948 } 949 #endif 950 951 uint instr; 952 instr = (Assembler::ldst_op << 30) 953 | (dst_enc << 25) 954 | (primary << 19) 955 | (src1_enc << 14); 956 957 uint index = src2_enc; 958 int disp = disp32; 959 960 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) { 961 disp += STACK_BIAS; 962 // Quick fix for JDK-8029668: check that stack offset fits, bailout if not 963 if (!Assembler::is_simm13(disp)) { 964 ra->C->record_method_not_compilable("unable to handle large constant offsets"); 965 return; 966 } 967 } 968 969 // We should have a compiler bailout here rather than a guarantee. 970 // Better yet would be some mechanism to handle variable-size matches correctly. 971 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" ); 972 973 if( disp == 0 ) { 974 // use reg-reg form 975 // bit 13 is already zero 976 instr |= index; 977 } else { 978 // use reg-imm form 979 instr |= 0x00002000; // set bit 13 to one 980 instr |= disp & 0x1FFF; 981 } 982 983 cbuf.insts()->emit_int32(instr); 984 985 #ifdef ASSERT 986 { 987 MacroAssembler _masm(&cbuf); 988 if (is_verified_oop_base) { 989 __ verify_oop(reg_to_register_object(src1_enc)); 990 } 991 if (is_verified_oop_store) { 992 __ verify_oop(reg_to_register_object(dst_enc)); 993 } 994 if (tmp_enc != -1) { 995 __ mov(O7, reg_to_register_object(tmp_enc)); 996 } 997 if (is_verified_oop_load) { 998 __ verify_oop(reg_to_register_object(dst_enc)); 999 } 1000 } 1001 #endif 1002 } 1003 1004 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) { 1005 // The method which records debug information at every safepoint 1006 // expects the call to be the first instruction in the snippet as 1007 // it creates a PcDesc structure which tracks the offset of a call 1008 // from the start of the codeBlob. This offset is computed as 1009 // code_end() - code_begin() of the code which has been emitted 1010 // so far. 1011 // In this particular case we have skirted around the problem by 1012 // putting the "mov" instruction in the delay slot but the problem 1013 // may bite us again at some other point and a cleaner/generic 1014 // solution using relocations would be needed. 1015 MacroAssembler _masm(&cbuf); 1016 __ set_inst_mark(); 1017 1018 // We flush the current window just so that there is a valid stack copy 1019 // the fact that the current window becomes active again instantly is 1020 // not a problem there is nothing live in it. 1021 1022 #ifdef ASSERT 1023 int startpos = __ offset(); 1024 #endif /* ASSERT */ 1025 1026 __ call((address)entry_point, rtype); 1027 1028 if (preserve_g2) __ delayed()->mov(G2, L7); 1029 else __ delayed()->nop(); 1030 1031 if (preserve_g2) __ mov(L7, G2); 1032 1033 #ifdef ASSERT 1034 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) { 1035 #ifdef _LP64 1036 // Trash argument dump slots. 1037 __ set(0xb0b8ac0db0b8ac0d, G1); 1038 __ mov(G1, G5); 1039 __ stx(G1, SP, STACK_BIAS + 0x80); 1040 __ stx(G1, SP, STACK_BIAS + 0x88); 1041 __ stx(G1, SP, STACK_BIAS + 0x90); 1042 __ stx(G1, SP, STACK_BIAS + 0x98); 1043 __ stx(G1, SP, STACK_BIAS + 0xA0); 1044 __ stx(G1, SP, STACK_BIAS + 0xA8); 1045 #else // _LP64 1046 // this is also a native call, so smash the first 7 stack locations, 1047 // and the various registers 1048 1049 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset], 1050 // while [SP+0x44..0x58] are the argument dump slots. 1051 __ set((intptr_t)0xbaadf00d, G1); 1052 __ mov(G1, G5); 1053 __ sllx(G1, 32, G1); 1054 __ or3(G1, G5, G1); 1055 __ mov(G1, G5); 1056 __ stx(G1, SP, 0x40); 1057 __ stx(G1, SP, 0x48); 1058 __ stx(G1, SP, 0x50); 1059 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot 1060 #endif // _LP64 1061 } 1062 #endif /*ASSERT*/ 1063 } 1064 1065 //============================================================================= 1066 // REQUIRED FUNCTIONALITY for encoding 1067 void emit_lo(CodeBuffer &cbuf, int val) { } 1068 void emit_hi(CodeBuffer &cbuf, int val) { } 1069 1070 1071 //============================================================================= 1072 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask(); 1073 1074 int Compile::ConstantTable::calculate_table_base_offset() const { 1075 if (UseRDPCForConstantTableBase) { 1076 // The table base offset might be less but then it fits into 1077 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit). 1078 return Assembler::min_simm13(); 1079 } else { 1080 int offset = -(size() / 2); 1081 if (!Assembler::is_simm13(offset)) { 1082 offset = Assembler::min_simm13(); 1083 } 1084 return offset; 1085 } 1086 } 1087 1088 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } 1089 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { 1090 ShouldNotReachHere(); 1091 } 1092 1093 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1094 Compile* C = ra_->C; 1095 Compile::ConstantTable& constant_table = C->constant_table(); 1096 MacroAssembler _masm(&cbuf); 1097 1098 Register r = as_Register(ra_->get_encode(this)); 1099 CodeSection* consts_section = __ code()->consts(); 1100 int consts_size = consts_section->align_at_start(consts_section->size()); 1101 assert(constant_table.size() == consts_size, "must be: %d == %d", constant_table.size(), consts_size); 1102 1103 if (UseRDPCForConstantTableBase) { 1104 // For the following RDPC logic to work correctly the consts 1105 // section must be allocated right before the insts section. This 1106 // assert checks for that. The layout and the SECT_* constants 1107 // are defined in src/share/vm/asm/codeBuffer.hpp. 1108 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 1109 int insts_offset = __ offset(); 1110 1111 // Layout: 1112 // 1113 // |----------- consts section ------------|----------- insts section -----------... 1114 // |------ constant table -----|- padding -|------------------x---- 1115 // \ current PC (RDPC instruction) 1116 // |<------------- consts_size ----------->|<- insts_offset ->| 1117 // \ table base 1118 // The table base offset is later added to the load displacement 1119 // so it has to be negative. 1120 int table_base_offset = -(consts_size + insts_offset); 1121 int disp; 1122 1123 // If the displacement from the current PC to the constant table 1124 // base fits into simm13 we set the constant table base to the 1125 // current PC. 1126 if (Assembler::is_simm13(table_base_offset)) { 1127 constant_table.set_table_base_offset(table_base_offset); 1128 disp = 0; 1129 } else { 1130 // Otherwise we set the constant table base offset to the 1131 // maximum negative displacement of load instructions to keep 1132 // the disp as small as possible: 1133 // 1134 // |<------------- consts_size ----------->|<- insts_offset ->| 1135 // |<--------- min_simm13 --------->|<-------- disp --------->| 1136 // \ table base 1137 table_base_offset = Assembler::min_simm13(); 1138 constant_table.set_table_base_offset(table_base_offset); 1139 disp = (consts_size + insts_offset) + table_base_offset; 1140 } 1141 1142 __ rdpc(r); 1143 1144 if (disp != 0) { 1145 assert(r != O7, "need temporary"); 1146 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 1147 } 1148 } 1149 else { 1150 // Materialize the constant table base. 1151 address baseaddr = consts_section->start() + -(constant_table.table_base_offset()); 1152 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 1153 AddressLiteral base(baseaddr, rspec); 1154 __ set(base, r); 1155 } 1156 } 1157 1158 uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 1159 if (UseRDPCForConstantTableBase) { 1160 // This is really the worst case but generally it's only 1 instruction. 1161 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord; 1162 } else { 1163 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord; 1164 } 1165 } 1166 1167 #ifndef PRODUCT 1168 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1169 char reg[128]; 1170 ra_->dump_register(this, reg); 1171 if (UseRDPCForConstantTableBase) { 1172 st->print("RDPC %s\t! constant table base", reg); 1173 } else { 1174 st->print("SET &constanttable,%s\t! constant table base", reg); 1175 } 1176 } 1177 #endif 1178 1179 1180 //============================================================================= 1181 1182 #ifndef PRODUCT 1183 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1184 Compile* C = ra_->C; 1185 1186 for (int i = 0; i < OptoPrologueNops; i++) { 1187 st->print_cr("NOP"); st->print("\t"); 1188 } 1189 1190 if( VerifyThread ) { 1191 st->print_cr("Verify_Thread"); st->print("\t"); 1192 } 1193 1194 size_t framesize = C->frame_size_in_bytes(); 1195 int bangsize = C->bang_size_in_bytes(); 1196 1197 // Calls to C2R adapters often do not accept exceptional returns. 1198 // We require that their callers must bang for them. But be careful, because 1199 // some VM calls (such as call site linkage) can use several kilobytes of 1200 // stack. But the stack safety zone should account for that. 1201 // See bugs 4446381, 4468289, 4497237. 1202 if (C->need_stack_bang(bangsize)) { 1203 st->print_cr("! stack bang (%d bytes)", bangsize); st->print("\t"); 1204 } 1205 1206 if (Assembler::is_simm13(-framesize)) { 1207 st->print ("SAVE R_SP,-" SIZE_FORMAT ",R_SP",framesize); 1208 } else { 1209 st->print_cr("SETHI R_SP,hi%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); 1210 st->print_cr("ADD R_G3,lo%%(-" SIZE_FORMAT "),R_G3",framesize); st->print("\t"); 1211 st->print ("SAVE R_SP,R_G3,R_SP"); 1212 } 1213 1214 } 1215 #endif 1216 1217 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1218 Compile* C = ra_->C; 1219 MacroAssembler _masm(&cbuf); 1220 1221 for (int i = 0; i < OptoPrologueNops; i++) { 1222 __ nop(); 1223 } 1224 1225 __ verify_thread(); 1226 1227 size_t framesize = C->frame_size_in_bytes(); 1228 assert(framesize >= 16*wordSize, "must have room for reg. save area"); 1229 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1230 int bangsize = C->bang_size_in_bytes(); 1231 1232 // Calls to C2R adapters often do not accept exceptional returns. 1233 // We require that their callers must bang for them. But be careful, because 1234 // some VM calls (such as call site linkage) can use several kilobytes of 1235 // stack. But the stack safety zone should account for that. 1236 // See bugs 4446381, 4468289, 4497237. 1237 if (C->need_stack_bang(bangsize)) { 1238 __ generate_stack_overflow_check(bangsize); 1239 } 1240 1241 if (Assembler::is_simm13(-framesize)) { 1242 __ save(SP, -framesize, SP); 1243 } else { 1244 __ sethi(-framesize & ~0x3ff, G3); 1245 __ add(G3, -framesize & 0x3ff, G3); 1246 __ save(SP, G3, SP); 1247 } 1248 C->set_frame_complete( __ offset() ); 1249 1250 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) { 1251 // NOTE: We set the table base offset here because users might be 1252 // emitted before MachConstantBaseNode. 1253 Compile::ConstantTable& constant_table = C->constant_table(); 1254 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); 1255 } 1256 } 1257 1258 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1259 return MachNode::size(ra_); 1260 } 1261 1262 int MachPrologNode::reloc() const { 1263 return 10; // a large enough number 1264 } 1265 1266 //============================================================================= 1267 #ifndef PRODUCT 1268 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1269 Compile* C = ra_->C; 1270 1271 if(do_polling() && ra_->C->is_method_compilation()) { 1272 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t"); 1273 #ifdef _LP64 1274 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t"); 1275 #else 1276 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t"); 1277 #endif 1278 } 1279 1280 if(do_polling()) { 1281 if (UseCBCond && !ra_->C->is_method_compilation()) { 1282 st->print("NOP\n\t"); 1283 } 1284 st->print("RET\n\t"); 1285 } 1286 1287 st->print("RESTORE"); 1288 } 1289 #endif 1290 1291 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1292 MacroAssembler _masm(&cbuf); 1293 Compile* C = ra_->C; 1294 1295 __ verify_thread(); 1296 1297 // If this does safepoint polling, then do it here 1298 if(do_polling() && ra_->C->is_method_compilation()) { 1299 AddressLiteral polling_page(os::get_polling_page()); 1300 __ sethi(polling_page, L0); 1301 __ relocate(relocInfo::poll_return_type); 1302 __ ld_ptr(L0, 0, G0); 1303 } 1304 1305 // If this is a return, then stuff the restore in the delay slot 1306 if(do_polling()) { 1307 if (UseCBCond && !ra_->C->is_method_compilation()) { 1308 // Insert extra padding for the case when the epilogue is preceded by 1309 // a cbcond jump, which can't be followed by a CTI instruction 1310 __ nop(); 1311 } 1312 __ ret(); 1313 __ delayed()->restore(); 1314 } else { 1315 __ restore(); 1316 } 1317 } 1318 1319 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1320 return MachNode::size(ra_); 1321 } 1322 1323 int MachEpilogNode::reloc() const { 1324 return 16; // a large enough number 1325 } 1326 1327 const Pipeline * MachEpilogNode::pipeline() const { 1328 return MachNode::pipeline_class(); 1329 } 1330 1331 int MachEpilogNode::safepoint_offset() const { 1332 assert( do_polling(), "no return for this epilog node"); 1333 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord; 1334 } 1335 1336 //============================================================================= 1337 1338 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack 1339 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1340 static enum RC rc_class( OptoReg::Name reg ) { 1341 if( !OptoReg::is_valid(reg) ) return rc_bad; 1342 if (OptoReg::is_stack(reg)) return rc_stack; 1343 VMReg r = OptoReg::as_VMReg(reg); 1344 if (r->is_Register()) return rc_int; 1345 assert(r->is_FloatRegister(), "must be"); 1346 return rc_float; 1347 } 1348 1349 static int impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) { 1350 if (cbuf) { 1351 emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]); 1352 } 1353 #ifndef PRODUCT 1354 else if (!do_size) { 1355 if (size != 0) st->print("\n\t"); 1356 if (is_load) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg)); 1357 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset); 1358 } 1359 #endif 1360 return size+4; 1361 } 1362 1363 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) { 1364 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] ); 1365 #ifndef PRODUCT 1366 else if( !do_size ) { 1367 if( size != 0 ) st->print("\n\t"); 1368 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst)); 1369 } 1370 #endif 1371 return size+4; 1372 } 1373 1374 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, 1375 PhaseRegAlloc *ra_, 1376 bool do_size, 1377 outputStream* st ) const { 1378 // Get registers to move 1379 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1380 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1381 OptoReg::Name dst_second = ra_->get_reg_second(this ); 1382 OptoReg::Name dst_first = ra_->get_reg_first(this ); 1383 1384 enum RC src_second_rc = rc_class(src_second); 1385 enum RC src_first_rc = rc_class(src_first); 1386 enum RC dst_second_rc = rc_class(dst_second); 1387 enum RC dst_first_rc = rc_class(dst_first); 1388 1389 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" ); 1390 1391 // Generate spill code! 1392 int size = 0; 1393 1394 if( src_first == dst_first && src_second == dst_second ) 1395 return size; // Self copy, no move 1396 1397 // -------------------------------------- 1398 // Check for mem-mem move. Load into unused float registers and fall into 1399 // the float-store case. 1400 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) { 1401 int offset = ra_->reg2offset(src_first); 1402 // Further check for aligned-adjacent pair, so we can use a double load 1403 if( (src_first&1)==0 && src_first+1 == src_second ) { 1404 src_second = OptoReg::Name(R_F31_num); 1405 src_second_rc = rc_float; 1406 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st); 1407 } else { 1408 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st); 1409 } 1410 src_first = OptoReg::Name(R_F30_num); 1411 src_first_rc = rc_float; 1412 } 1413 1414 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) { 1415 int offset = ra_->reg2offset(src_second); 1416 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st); 1417 src_second = OptoReg::Name(R_F31_num); 1418 src_second_rc = rc_float; 1419 } 1420 1421 // -------------------------------------- 1422 // Check for float->int copy; requires a trip through memory 1423 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) { 1424 int offset = frame::register_save_words*wordSize; 1425 if (cbuf) { 1426 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 ); 1427 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1428 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1429 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 ); 1430 } 1431 #ifndef PRODUCT 1432 else if (!do_size) { 1433 if (size != 0) st->print("\n\t"); 1434 st->print( "SUB R_SP,16,R_SP\n"); 1435 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1436 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1437 st->print("\tADD R_SP,16,R_SP\n"); 1438 } 1439 #endif 1440 size += 16; 1441 } 1442 1443 // Check for float->int copy on T4 1444 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) { 1445 // Further check for aligned-adjacent pair, so we can use a double move 1446 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1447 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st); 1448 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st); 1449 } 1450 // Check for int->float copy on T4 1451 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) { 1452 // Further check for aligned-adjacent pair, so we can use a double move 1453 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second) 1454 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st); 1455 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st); 1456 } 1457 1458 // -------------------------------------- 1459 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations. 1460 // In such cases, I have to do the big-endian swap. For aligned targets, the 1461 // hardware does the flop for me. Doubles are always aligned, so no problem 1462 // there. Misaligned sources only come from native-long-returns (handled 1463 // special below). 1464 #ifndef _LP64 1465 if( src_first_rc == rc_int && // source is already big-endian 1466 src_second_rc != rc_bad && // 64-bit move 1467 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst 1468 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" ); 1469 // Do the big-endian flop. 1470 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ; 1471 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc; 1472 } 1473 #endif 1474 1475 // -------------------------------------- 1476 // Check for integer reg-reg copy 1477 if( src_first_rc == rc_int && dst_first_rc == rc_int ) { 1478 #ifndef _LP64 1479 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case 1480 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1481 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1482 // operand contains the least significant word of the 64-bit value and vice versa. 1483 OptoReg::Name tmp = OptoReg::Name(R_O7_num); 1484 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" ); 1485 // Shift O0 left in-place, zero-extend O1, then OR them into the dst 1486 if( cbuf ) { 1487 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 ); 1488 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 ); 1489 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] ); 1490 #ifndef PRODUCT 1491 } else if( !do_size ) { 1492 if( size != 0 ) st->print("\n\t"); 1493 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp)); 1494 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second)); 1495 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first)); 1496 #endif 1497 } 1498 return size+12; 1499 } 1500 else if( dst_first == R_I0_num && dst_second == R_I1_num ) { 1501 // returning a long value in I0/I1 1502 // a SpillCopy must be able to target a return instruction's reg_class 1503 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value 1504 // as stored in memory. On a big-endian machine like SPARC, this means that the _second 1505 // operand contains the least significant word of the 64-bit value and vice versa. 1506 OptoReg::Name tdest = dst_first; 1507 1508 if (src_first == dst_first) { 1509 tdest = OptoReg::Name(R_O7_num); 1510 size += 4; 1511 } 1512 1513 if( cbuf ) { 1514 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg"); 1515 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1 1516 // ShrL_reg_imm6 1517 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 ); 1518 // ShrR_reg_imm6 src, 0, dst 1519 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 ); 1520 if (tdest != dst_first) { 1521 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] ); 1522 } 1523 } 1524 #ifndef PRODUCT 1525 else if( !do_size ) { 1526 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!! 1527 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest)); 1528 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second)); 1529 if (tdest != dst_first) { 1530 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first)); 1531 } 1532 } 1533 #endif // PRODUCT 1534 return size+8; 1535 } 1536 #endif // !_LP64 1537 // Else normal reg-reg copy 1538 assert( src_second != dst_first, "smashed second before evacuating it" ); 1539 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st); 1540 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" ); 1541 // This moves an aligned adjacent pair. 1542 // See if we are done. 1543 if( src_first+1 == src_second && dst_first+1 == dst_second ) 1544 return size; 1545 } 1546 1547 // Check for integer store 1548 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) { 1549 int offset = ra_->reg2offset(dst_first); 1550 // Further check for aligned-adjacent pair, so we can use a double store 1551 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1552 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st); 1553 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st); 1554 } 1555 1556 // Check for integer load 1557 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) { 1558 int offset = ra_->reg2offset(src_first); 1559 // Further check for aligned-adjacent pair, so we can use a double load 1560 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1561 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st); 1562 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st); 1563 } 1564 1565 // Check for float reg-reg copy 1566 if( src_first_rc == rc_float && dst_first_rc == rc_float ) { 1567 // Further check for aligned-adjacent pair, so we can use a double move 1568 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1569 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st); 1570 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st); 1571 } 1572 1573 // Check for float store 1574 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) { 1575 int offset = ra_->reg2offset(dst_first); 1576 // Further check for aligned-adjacent pair, so we can use a double store 1577 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1578 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st); 1579 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st); 1580 } 1581 1582 // Check for float load 1583 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) { 1584 int offset = ra_->reg2offset(src_first); 1585 // Further check for aligned-adjacent pair, so we can use a double load 1586 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second ) 1587 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st); 1588 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st); 1589 } 1590 1591 // -------------------------------------------------------------------- 1592 // Check for hi bits still needing moving. Only happens for misaligned 1593 // arguments to native calls. 1594 if( src_second == dst_second ) 1595 return size; // Self copy; no move 1596 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" ); 1597 1598 #ifndef _LP64 1599 // In the LP64 build, all registers can be moved as aligned/adjacent 1600 // pairs, so there's never any need to move the high bits separately. 1601 // The 32-bit builds have to deal with the 32-bit ABI which can force 1602 // all sorts of silly alignment problems. 1603 1604 // Check for integer reg-reg copy. Hi bits are stuck up in the top 1605 // 32-bits of a 64-bit register, but are needed in low bits of another 1606 // register (else it's a hi-bits-to-hi-bits copy which should have 1607 // happened already as part of a 64-bit move) 1608 if( src_second_rc == rc_int && dst_second_rc == rc_int ) { 1609 assert( (src_second&1)==1, "its the evil O0/O1 native return case" ); 1610 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" ); 1611 // Shift src_second down to dst_second's low bits. 1612 if( cbuf ) { 1613 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1614 #ifndef PRODUCT 1615 } else if( !do_size ) { 1616 if( size != 0 ) st->print("\n\t"); 1617 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second)); 1618 #endif 1619 } 1620 return size+4; 1621 } 1622 1623 // Check for high word integer store. Must down-shift the hi bits 1624 // into a temp register, then fall into the case of storing int bits. 1625 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) { 1626 // Shift src_second down to dst_second's low bits. 1627 if( cbuf ) { 1628 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 ); 1629 #ifndef PRODUCT 1630 } else if( !do_size ) { 1631 if( size != 0 ) st->print("\n\t"); 1632 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num)); 1633 #endif 1634 } 1635 size+=4; 1636 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num! 1637 } 1638 1639 // Check for high word integer load 1640 if( dst_second_rc == rc_int && src_second_rc == rc_stack ) 1641 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st); 1642 1643 // Check for high word integer store 1644 if( src_second_rc == rc_int && dst_second_rc == rc_stack ) 1645 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st); 1646 1647 // Check for high word float store 1648 if( src_second_rc == rc_float && dst_second_rc == rc_stack ) 1649 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st); 1650 1651 #endif // !_LP64 1652 1653 Unimplemented(); 1654 } 1655 1656 #ifndef PRODUCT 1657 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1658 implementation( NULL, ra_, false, st ); 1659 } 1660 #endif 1661 1662 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1663 implementation( &cbuf, ra_, false, NULL ); 1664 } 1665 1666 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 1667 return implementation( NULL, ra_, true, NULL ); 1668 } 1669 1670 //============================================================================= 1671 #ifndef PRODUCT 1672 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const { 1673 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count); 1674 } 1675 #endif 1676 1677 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const { 1678 MacroAssembler _masm(&cbuf); 1679 for(int i = 0; i < _count; i += 1) { 1680 __ nop(); 1681 } 1682 } 1683 1684 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 1685 return 4 * _count; 1686 } 1687 1688 1689 //============================================================================= 1690 #ifndef PRODUCT 1691 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1692 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1693 int reg = ra_->get_reg_first(this); 1694 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]); 1695 } 1696 #endif 1697 1698 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1699 MacroAssembler _masm(&cbuf); 1700 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS; 1701 int reg = ra_->get_encode(this); 1702 1703 if (Assembler::is_simm13(offset)) { 1704 __ add(SP, offset, reg_to_register_object(reg)); 1705 } else { 1706 __ set(offset, O7); 1707 __ add(SP, O7, reg_to_register_object(reg)); 1708 } 1709 } 1710 1711 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 1712 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_) 1713 assert(ra_ == ra_->C->regalloc(), "sanity"); 1714 return ra_->C->scratch_emit_size(this); 1715 } 1716 1717 //============================================================================= 1718 #ifndef PRODUCT 1719 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 1720 st->print_cr("\nUEP:"); 1721 #ifdef _LP64 1722 if (UseCompressedClassPointers) { 1723 assert(Universe::heap() != NULL, "java heap should be initialized"); 1724 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass"); 1725 if (Universe::narrow_klass_base() != 0) { 1726 st->print_cr("\tSET Universe::narrow_klass_base,R_G6_heap_base"); 1727 if (Universe::narrow_klass_shift() != 0) { 1728 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1729 } 1730 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5"); 1731 st->print_cr("\tSET Universe::narrow_ptrs_base,R_G6_heap_base"); 1732 } else { 1733 st->print_cr("\tSLL R_G5,Universe::narrow_klass_shift,R_G5"); 1734 } 1735 } else { 1736 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1737 } 1738 st->print_cr("\tCMP R_G5,R_G3" ); 1739 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1740 #else // _LP64 1741 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check"); 1742 st->print_cr("\tCMP R_G5,R_G3" ); 1743 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2"); 1744 #endif // _LP64 1745 } 1746 #endif 1747 1748 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1749 MacroAssembler _masm(&cbuf); 1750 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 1751 Register temp_reg = G3; 1752 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 1753 1754 // Load klass from receiver 1755 __ load_klass(O0, temp_reg); 1756 // Compare against expected klass 1757 __ cmp(temp_reg, G5_ic_reg); 1758 // Branch to miss code, checks xcc or icc depending 1759 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2); 1760 } 1761 1762 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 1763 return MachNode::size(ra_); 1764 } 1765 1766 1767 //============================================================================= 1768 1769 1770 // Emit exception handler code. 1771 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) { 1772 Register temp_reg = G3; 1773 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 1774 MacroAssembler _masm(&cbuf); 1775 1776 address base = __ start_a_stub(size_exception_handler()); 1777 if (base == NULL) { 1778 ciEnv::current()->record_failure("CodeCache is full"); 1779 return 0; // CodeBuffer::expand failed 1780 } 1781 1782 int offset = __ offset(); 1783 1784 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp 1785 __ delayed()->nop(); 1786 1787 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1788 1789 __ end_a_stub(); 1790 1791 return offset; 1792 } 1793 1794 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { 1795 // Can't use any of the current frame's registers as we may have deopted 1796 // at a poll and everything (including G3) can be live. 1797 Register temp_reg = L0; 1798 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); 1799 MacroAssembler _masm(&cbuf); 1800 1801 address base = __ start_a_stub(size_deopt_handler()); 1802 if (base == NULL) { 1803 ciEnv::current()->record_failure("CodeCache is full"); 1804 return 0; // CodeBuffer::expand failed 1805 } 1806 1807 int offset = __ offset(); 1808 __ save_frame(0); 1809 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp 1810 __ delayed()->restore(); 1811 1812 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1813 1814 __ end_a_stub(); 1815 return offset; 1816 1817 } 1818 1819 // Given a register encoding, produce a Integer Register object 1820 static Register reg_to_register_object(int register_encoding) { 1821 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding"); 1822 return as_Register(register_encoding); 1823 } 1824 1825 // Given a register encoding, produce a single-precision Float Register object 1826 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) { 1827 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding"); 1828 return as_SingleFloatRegister(register_encoding); 1829 } 1830 1831 // Given a register encoding, produce a double-precision Float Register object 1832 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) { 1833 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding"); 1834 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding"); 1835 return as_DoubleFloatRegister(register_encoding); 1836 } 1837 1838 const bool Matcher::match_rule_supported(int opcode) { 1839 if (!has_match_rule(opcode)) 1840 return false; 1841 1842 switch (opcode) { 1843 case Op_CountLeadingZerosI: 1844 case Op_CountLeadingZerosL: 1845 case Op_CountTrailingZerosI: 1846 case Op_CountTrailingZerosL: 1847 case Op_PopCountI: 1848 case Op_PopCountL: 1849 if (!UsePopCountInstruction) 1850 return false; 1851 case Op_CompareAndSwapL: 1852 #ifdef _LP64 1853 case Op_CompareAndSwapP: 1854 #endif 1855 if (!VM_Version::supports_cx8()) 1856 return false; 1857 break; 1858 } 1859 1860 return true; // Per default match rules are supported. 1861 } 1862 1863 const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { 1864 1865 // TODO 1866 // identify extra cases that we might want to provide match rules for 1867 // e.g. Op_ vector nodes and other intrinsics while guarding with vlen 1868 if (!has_match_rule(opcode)) { 1869 return false; 1870 } 1871 1872 bool ret_value = match_rule_supported(opcode); 1873 // Add rules here. 1874 1875 return ret_value; // Per default match rules are supported. 1876 } 1877 1878 const int Matcher::float_pressure(int default_pressure_threshold) { 1879 return default_pressure_threshold; 1880 } 1881 1882 int Matcher::regnum_to_fpu_offset(int regnum) { 1883 return regnum - 32; // The FP registers are in the second chunk 1884 } 1885 1886 #ifdef ASSERT 1887 address last_rethrow = NULL; // debugging aid for Rethrow encoding 1888 #endif 1889 1890 // Vector width in bytes 1891 const int Matcher::vector_width_in_bytes(BasicType bt) { 1892 assert(MaxVectorSize == 8, ""); 1893 return 8; 1894 } 1895 1896 // Vector ideal reg 1897 const int Matcher::vector_ideal_reg(int size) { 1898 assert(MaxVectorSize == 8, ""); 1899 return Op_RegD; 1900 } 1901 1902 const int Matcher::vector_shift_count_ideal_reg(int size) { 1903 fatal("vector shift is not supported"); 1904 return Node::NotAMachineReg; 1905 } 1906 1907 // Limits on vector size (number of elements) loaded into vector. 1908 const int Matcher::max_vector_size(const BasicType bt) { 1909 assert(is_java_primitive(bt), "only primitive type vectors"); 1910 return vector_width_in_bytes(bt)/type2aelembytes(bt); 1911 } 1912 1913 const int Matcher::min_vector_size(const BasicType bt) { 1914 return max_vector_size(bt); // Same as max. 1915 } 1916 1917 // SPARC doesn't support misaligned vectors store/load. 1918 const bool Matcher::misaligned_vectors_ok() { 1919 return false; 1920 } 1921 1922 // Current (2013) SPARC platforms need to read original key 1923 // to construct decryption expanded key 1924 const bool Matcher::pass_original_key_for_aes() { 1925 return true; 1926 } 1927 1928 // USII supports fxtof through the whole range of number, USIII doesn't 1929 const bool Matcher::convL2FSupported(void) { 1930 return VM_Version::has_fast_fxtof(); 1931 } 1932 1933 // Is this branch offset short enough that a short branch can be used? 1934 // 1935 // NOTE: If the platform does not provide any short branch variants, then 1936 // this method should return false for offset 0. 1937 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 1938 // The passed offset is relative to address of the branch. 1939 // Don't need to adjust the offset. 1940 return UseCBCond && Assembler::is_simm12(offset); 1941 } 1942 1943 const bool Matcher::isSimpleConstant64(jlong value) { 1944 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1945 // Depends on optimizations in MacroAssembler::setx. 1946 int hi = (int)(value >> 32); 1947 int lo = (int)(value & ~0); 1948 return (hi == 0) || (hi == -1) || (lo == 0); 1949 } 1950 1951 // No scaling for the parameter the ClearArray node. 1952 const bool Matcher::init_array_count_is_in_bytes = true; 1953 1954 // Threshold size for cleararray. 1955 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1956 1957 // No additional cost for CMOVL. 1958 const int Matcher::long_cmove_cost() { return 0; } 1959 1960 // CMOVF/CMOVD are expensive on T4 and on SPARC64. 1961 const int Matcher::float_cmove_cost() { 1962 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0; 1963 } 1964 1965 // Does the CPU require late expand (see block.cpp for description of late expand)? 1966 const bool Matcher::require_postalloc_expand = false; 1967 1968 // Should the Matcher clone shifts on addressing modes, expecting them to 1969 // be subsumed into complex addressing expressions or compute them into 1970 // registers? True for Intel but false for most RISCs 1971 const bool Matcher::clone_shift_expressions = false; 1972 1973 // Do we need to mask the count passed to shift instructions or does 1974 // the cpu only look at the lower 5/6 bits anyway? 1975 const bool Matcher::need_masked_shift_count = false; 1976 1977 bool Matcher::narrow_oop_use_complex_address() { 1978 NOT_LP64(ShouldNotCallThis()); 1979 assert(UseCompressedOops, "only for compressed oops code"); 1980 return false; 1981 } 1982 1983 bool Matcher::narrow_klass_use_complex_address() { 1984 NOT_LP64(ShouldNotCallThis()); 1985 assert(UseCompressedClassPointers, "only for compressed klass code"); 1986 return false; 1987 } 1988 1989 // Is it better to copy float constants, or load them directly from memory? 1990 // Intel can load a float constant from a direct address, requiring no 1991 // extra registers. Most RISCs will have to materialize an address into a 1992 // register first, so they would do better to copy the constant from stack. 1993 const bool Matcher::rematerialize_float_constants = false; 1994 1995 // If CPU can load and store mis-aligned doubles directly then no fixup is 1996 // needed. Else we split the double into 2 integer pieces and move it 1997 // piece-by-piece. Only happens when passing doubles into C code as the 1998 // Java calling convention forces doubles to be aligned. 1999 #ifdef _LP64 2000 const bool Matcher::misaligned_doubles_ok = true; 2001 #else 2002 const bool Matcher::misaligned_doubles_ok = false; 2003 #endif 2004 2005 // No-op on SPARC. 2006 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 2007 } 2008 2009 // Advertise here if the CPU requires explicit rounding operations 2010 // to implement the UseStrictFP mode. 2011 const bool Matcher::strict_fp_requires_explicit_rounding = false; 2012 2013 // Are floats converted to double when stored to stack during deoptimization? 2014 // Sparc does not handle callee-save floats. 2015 bool Matcher::float_in_double() { return false; } 2016 2017 // Do ints take an entire long register or just half? 2018 // Note that we if-def off of _LP64. 2019 // The relevant question is how the int is callee-saved. In _LP64 2020 // the whole long is written but de-opt'ing will have to extract 2021 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written. 2022 #ifdef _LP64 2023 const bool Matcher::int_in_long = true; 2024 #else 2025 const bool Matcher::int_in_long = false; 2026 #endif 2027 2028 // Return whether or not this register is ever used as an argument. This 2029 // function is used on startup to build the trampoline stubs in generateOptoStub. 2030 // Registers not mentioned will be killed by the VM call in the trampoline, and 2031 // arguments in those registers not be available to the callee. 2032 bool Matcher::can_be_java_arg( int reg ) { 2033 // Standard sparc 6 args in registers 2034 if( reg == R_I0_num || 2035 reg == R_I1_num || 2036 reg == R_I2_num || 2037 reg == R_I3_num || 2038 reg == R_I4_num || 2039 reg == R_I5_num ) return true; 2040 #ifdef _LP64 2041 // 64-bit builds can pass 64-bit pointers and longs in 2042 // the high I registers 2043 if( reg == R_I0H_num || 2044 reg == R_I1H_num || 2045 reg == R_I2H_num || 2046 reg == R_I3H_num || 2047 reg == R_I4H_num || 2048 reg == R_I5H_num ) return true; 2049 2050 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) { 2051 return true; 2052 } 2053 2054 #else 2055 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4. 2056 // Longs cannot be passed in O regs, because O regs become I regs 2057 // after a 'save' and I regs get their high bits chopped off on 2058 // interrupt. 2059 if( reg == R_G1H_num || reg == R_G1_num ) return true; 2060 if( reg == R_G4H_num || reg == R_G4_num ) return true; 2061 #endif 2062 // A few float args in registers 2063 if( reg >= R_F0_num && reg <= R_F7_num ) return true; 2064 2065 return false; 2066 } 2067 2068 bool Matcher::is_spillable_arg( int reg ) { 2069 return can_be_java_arg(reg); 2070 } 2071 2072 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 2073 // Use hardware SDIVX instruction when it is 2074 // faster than a code which use multiply. 2075 return VM_Version::has_fast_idiv(); 2076 } 2077 2078 // Register for DIVI projection of divmodI 2079 RegMask Matcher::divI_proj_mask() { 2080 ShouldNotReachHere(); 2081 return RegMask(); 2082 } 2083 2084 // Register for MODI projection of divmodI 2085 RegMask Matcher::modI_proj_mask() { 2086 ShouldNotReachHere(); 2087 return RegMask(); 2088 } 2089 2090 // Register for DIVL projection of divmodL 2091 RegMask Matcher::divL_proj_mask() { 2092 ShouldNotReachHere(); 2093 return RegMask(); 2094 } 2095 2096 // Register for MODL projection of divmodL 2097 RegMask Matcher::modL_proj_mask() { 2098 ShouldNotReachHere(); 2099 return RegMask(); 2100 } 2101 2102 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2103 return L7_REGP_mask(); 2104 } 2105 2106 %} 2107 2108 2109 // The intptr_t operand types, defined by textual substitution. 2110 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.) 2111 #ifdef _LP64 2112 #define immX immL 2113 #define immX13 immL13 2114 #define immX13m7 immL13m7 2115 #define iRegX iRegL 2116 #define g1RegX g1RegL 2117 #else 2118 #define immX immI 2119 #define immX13 immI13 2120 #define immX13m7 immI13m7 2121 #define iRegX iRegI 2122 #define g1RegX g1RegI 2123 #endif 2124 2125 //----------ENCODING BLOCK----------------------------------------------------- 2126 // This block specifies the encoding classes used by the compiler to output 2127 // byte streams. Encoding classes are parameterized macros used by 2128 // Machine Instruction Nodes in order to generate the bit encoding of the 2129 // instruction. Operands specify their base encoding interface with the 2130 // interface keyword. There are currently supported four interfaces, 2131 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2132 // operand to generate a function which returns its register number when 2133 // queried. CONST_INTER causes an operand to generate a function which 2134 // returns the value of the constant when queried. MEMORY_INTER causes an 2135 // operand to generate four functions which return the Base Register, the 2136 // Index Register, the Scale Value, and the Offset Value of the operand when 2137 // queried. COND_INTER causes an operand to generate six functions which 2138 // return the encoding code (ie - encoding bits for the instruction) 2139 // associated with each basic boolean condition for a conditional instruction. 2140 // 2141 // Instructions specify two basic values for encoding. Again, a function 2142 // is available to check if the constant displacement is an oop. They use the 2143 // ins_encode keyword to specify their encoding classes (which must be 2144 // a sequence of enc_class names, and their parameters, specified in 2145 // the encoding block), and they use the 2146 // opcode keyword to specify, in order, their primary, secondary, and 2147 // tertiary opcode. Only the opcode sections which a particular instruction 2148 // needs for encoding need to be specified. 2149 encode %{ 2150 enc_class enc_untested %{ 2151 #ifdef ASSERT 2152 MacroAssembler _masm(&cbuf); 2153 __ untested("encoding"); 2154 #endif 2155 %} 2156 2157 enc_class form3_mem_reg( memory mem, iRegI dst ) %{ 2158 emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary, 2159 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2160 %} 2161 2162 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{ 2163 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2164 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg); 2165 %} 2166 2167 enc_class form3_mem_prefetch_read( memory mem ) %{ 2168 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2169 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/); 2170 %} 2171 2172 enc_class form3_mem_prefetch_write( memory mem ) %{ 2173 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, 2174 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/); 2175 %} 2176 2177 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{ 2178 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2179 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2180 guarantee($mem$$index == R_G0_enc, "double index?"); 2181 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc ); 2182 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg ); 2183 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 ); 2184 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc ); 2185 %} 2186 2187 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{ 2188 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4"); 2189 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4"); 2190 guarantee($mem$$index == R_G0_enc, "double index?"); 2191 // Load long with 2 instructions 2192 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 ); 2193 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 ); 2194 %} 2195 2196 //%%% form3_mem_plus_4_reg is a hack--get rid of it 2197 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{ 2198 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4"); 2199 emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg); 2200 %} 2201 2202 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{ 2203 // Encode a reg-reg copy. If it is useless, then empty encoding. 2204 if( $rs2$$reg != $rd$$reg ) 2205 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg ); 2206 %} 2207 2208 // Target lo half of long 2209 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{ 2210 // Encode a reg-reg copy. If it is useless, then empty encoding. 2211 if( $rs2$$reg != LONG_LO_REG($rd$$reg) ) 2212 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg ); 2213 %} 2214 2215 // Source lo half of long 2216 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{ 2217 // Encode a reg-reg copy. If it is useless, then empty encoding. 2218 if( LONG_LO_REG($rs2$$reg) != $rd$$reg ) 2219 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) ); 2220 %} 2221 2222 // Target hi half of long 2223 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{ 2224 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 ); 2225 %} 2226 2227 // Source lo half of long, and leave it sign extended. 2228 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{ 2229 // Sign extend low half 2230 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 ); 2231 %} 2232 2233 // Source hi half of long, and leave it sign extended. 2234 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{ 2235 // Shift high half to low half 2236 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 ); 2237 %} 2238 2239 // Source hi half of long 2240 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{ 2241 // Encode a reg-reg copy. If it is useless, then empty encoding. 2242 if( LONG_HI_REG($rs2$$reg) != $rd$$reg ) 2243 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) ); 2244 %} 2245 2246 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{ 2247 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg ); 2248 %} 2249 2250 enc_class enc_to_bool( iRegI src, iRegI dst ) %{ 2251 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg ); 2252 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 ); 2253 %} 2254 2255 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{ 2256 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg ); 2257 // clear if nothing else is happening 2258 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 ); 2259 // blt,a,pn done 2260 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 ); 2261 // mov dst,-1 in delay slot 2262 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2263 %} 2264 2265 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{ 2266 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F ); 2267 %} 2268 2269 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{ 2270 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 ); 2271 %} 2272 2273 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{ 2274 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg ); 2275 %} 2276 2277 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{ 2278 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant ); 2279 %} 2280 2281 enc_class move_return_pc_to_o1() %{ 2282 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset ); 2283 %} 2284 2285 #ifdef _LP64 2286 /* %%% merge with enc_to_bool */ 2287 enc_class enc_convP2B( iRegI dst, iRegP src ) %{ 2288 MacroAssembler _masm(&cbuf); 2289 2290 Register src_reg = reg_to_register_object($src$$reg); 2291 Register dst_reg = reg_to_register_object($dst$$reg); 2292 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg); 2293 %} 2294 #endif 2295 2296 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{ 2297 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))) 2298 MacroAssembler _masm(&cbuf); 2299 2300 Register p_reg = reg_to_register_object($p$$reg); 2301 Register q_reg = reg_to_register_object($q$$reg); 2302 Register y_reg = reg_to_register_object($y$$reg); 2303 Register tmp_reg = reg_to_register_object($tmp$$reg); 2304 2305 __ subcc( p_reg, q_reg, p_reg ); 2306 __ add ( p_reg, y_reg, tmp_reg ); 2307 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg ); 2308 %} 2309 2310 enc_class form_d2i_helper(regD src, regF dst) %{ 2311 // fcmp %fcc0,$src,$src 2312 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2313 // branch %fcc0 not-nan, predict taken 2314 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2315 // fdtoi $src,$dst 2316 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg ); 2317 // fitos $dst,$dst (if nan) 2318 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2319 // clear $dst (if nan) 2320 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2321 // carry on here... 2322 %} 2323 2324 enc_class form_d2l_helper(regD src, regD dst) %{ 2325 // fcmp %fcc0,$src,$src check for NAN 2326 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg ); 2327 // branch %fcc0 not-nan, predict taken 2328 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2329 // fdtox $src,$dst convert in delay slot 2330 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg ); 2331 // fxtod $dst,$dst (if nan) 2332 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2333 // clear $dst (if nan) 2334 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2335 // carry on here... 2336 %} 2337 2338 enc_class form_f2i_helper(regF src, regF dst) %{ 2339 // fcmps %fcc0,$src,$src 2340 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2341 // branch %fcc0 not-nan, predict taken 2342 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2343 // fstoi $src,$dst 2344 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg ); 2345 // fitos $dst,$dst (if nan) 2346 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg ); 2347 // clear $dst (if nan) 2348 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg ); 2349 // carry on here... 2350 %} 2351 2352 enc_class form_f2l_helper(regF src, regD dst) %{ 2353 // fcmps %fcc0,$src,$src 2354 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg ); 2355 // branch %fcc0 not-nan, predict taken 2356 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 ); 2357 // fstox $src,$dst 2358 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg ); 2359 // fxtod $dst,$dst (if nan) 2360 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg ); 2361 // clear $dst (if nan) 2362 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg ); 2363 // carry on here... 2364 %} 2365 2366 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2367 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2368 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2369 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2370 2371 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %} 2372 2373 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %} 2374 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %} 2375 2376 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{ 2377 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2378 %} 2379 2380 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{ 2381 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2382 %} 2383 2384 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{ 2385 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2386 %} 2387 2388 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{ 2389 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg ); 2390 %} 2391 2392 enc_class form3_convI2F(regF rs2, regF rd) %{ 2393 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg); 2394 %} 2395 2396 // Encloding class for traceable jumps 2397 enc_class form_jmpl(g3RegP dest) %{ 2398 emit_jmpl(cbuf, $dest$$reg); 2399 %} 2400 2401 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{ 2402 emit_jmpl_set_exception_pc(cbuf, $dest$$reg); 2403 %} 2404 2405 enc_class form2_nop() %{ 2406 emit_nop(cbuf); 2407 %} 2408 2409 enc_class form2_illtrap() %{ 2410 emit_illtrap(cbuf); 2411 %} 2412 2413 2414 // Compare longs and convert into -1, 0, 1. 2415 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{ 2416 // CMP $src1,$src2 2417 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg ); 2418 // blt,a,pn done 2419 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 ); 2420 // mov dst,-1 in delay slot 2421 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 ); 2422 // bgt,a,pn done 2423 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 ); 2424 // mov dst,1 in delay slot 2425 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 ); 2426 // CLR $dst 2427 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 ); 2428 %} 2429 2430 enc_class enc_PartialSubtypeCheck() %{ 2431 MacroAssembler _masm(&cbuf); 2432 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type); 2433 __ delayed()->nop(); 2434 %} 2435 2436 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{ 2437 MacroAssembler _masm(&cbuf); 2438 Label* L = $labl$$label; 2439 Assembler::Predict predict_taken = 2440 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2441 2442 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 2443 __ delayed()->nop(); 2444 %} 2445 2446 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{ 2447 MacroAssembler _masm(&cbuf); 2448 Label* L = $labl$$label; 2449 Assembler::Predict predict_taken = 2450 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 2451 2452 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L); 2453 __ delayed()->nop(); 2454 %} 2455 2456 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{ 2457 int op = (Assembler::arith_op << 30) | 2458 ($dst$$reg << 25) | 2459 (Assembler::movcc_op3 << 19) | 2460 (1 << 18) | // cc2 bit for 'icc' 2461 ($cmp$$cmpcode << 14) | 2462 (0 << 13) | // select register move 2463 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 2464 ($src$$reg << 0); 2465 cbuf.insts()->emit_int32(op); 2466 %} 2467 2468 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 2469 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2470 int op = (Assembler::arith_op << 30) | 2471 ($dst$$reg << 25) | 2472 (Assembler::movcc_op3 << 19) | 2473 (1 << 18) | // cc2 bit for 'icc' 2474 ($cmp$$cmpcode << 14) | 2475 (1 << 13) | // select immediate move 2476 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 2477 (simm11 << 0); 2478 cbuf.insts()->emit_int32(op); 2479 %} 2480 2481 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 2482 int op = (Assembler::arith_op << 30) | 2483 ($dst$$reg << 25) | 2484 (Assembler::movcc_op3 << 19) | 2485 (0 << 18) | // cc2 bit for 'fccX' 2486 ($cmp$$cmpcode << 14) | 2487 (0 << 13) | // select register move 2488 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2489 ($src$$reg << 0); 2490 cbuf.insts()->emit_int32(op); 2491 %} 2492 2493 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 2494 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits 2495 int op = (Assembler::arith_op << 30) | 2496 ($dst$$reg << 25) | 2497 (Assembler::movcc_op3 << 19) | 2498 (0 << 18) | // cc2 bit for 'fccX' 2499 ($cmp$$cmpcode << 14) | 2500 (1 << 13) | // select immediate move 2501 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 2502 (simm11 << 0); 2503 cbuf.insts()->emit_int32(op); 2504 %} 2505 2506 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 2507 int op = (Assembler::arith_op << 30) | 2508 ($dst$$reg << 25) | 2509 (Assembler::fpop2_op3 << 19) | 2510 (0 << 18) | 2511 ($cmp$$cmpcode << 14) | 2512 (1 << 13) | // select register move 2513 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 2514 ($primary << 5) | // select single, double or quad 2515 ($src$$reg << 0); 2516 cbuf.insts()->emit_int32(op); 2517 %} 2518 2519 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 2520 int op = (Assembler::arith_op << 30) | 2521 ($dst$$reg << 25) | 2522 (Assembler::fpop2_op3 << 19) | 2523 (0 << 18) | 2524 ($cmp$$cmpcode << 14) | 2525 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 2526 ($primary << 5) | // select single, double or quad 2527 ($src$$reg << 0); 2528 cbuf.insts()->emit_int32(op); 2529 %} 2530 2531 // Used by the MIN/MAX encodings. Same as a CMOV, but 2532 // the condition comes from opcode-field instead of an argument. 2533 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{ 2534 int op = (Assembler::arith_op << 30) | 2535 ($dst$$reg << 25) | 2536 (Assembler::movcc_op3 << 19) | 2537 (1 << 18) | // cc2 bit for 'icc' 2538 ($primary << 14) | 2539 (0 << 13) | // select register move 2540 (0 << 11) | // cc1, cc0 bits for 'icc' 2541 ($src$$reg << 0); 2542 cbuf.insts()->emit_int32(op); 2543 %} 2544 2545 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 2546 int op = (Assembler::arith_op << 30) | 2547 ($dst$$reg << 25) | 2548 (Assembler::movcc_op3 << 19) | 2549 (6 << 16) | // cc2 bit for 'xcc' 2550 ($primary << 14) | 2551 (0 << 13) | // select register move 2552 (0 << 11) | // cc1, cc0 bits for 'icc' 2553 ($src$$reg << 0); 2554 cbuf.insts()->emit_int32(op); 2555 %} 2556 2557 enc_class Set13( immI13 src, iRegI rd ) %{ 2558 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 2559 %} 2560 2561 enc_class SetHi22( immI src, iRegI rd ) %{ 2562 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant ); 2563 %} 2564 2565 enc_class Set32( immI src, iRegI rd ) %{ 2566 MacroAssembler _masm(&cbuf); 2567 __ set($src$$constant, reg_to_register_object($rd$$reg)); 2568 %} 2569 2570 enc_class call_epilog %{ 2571 if( VerifyStackAtCalls ) { 2572 MacroAssembler _masm(&cbuf); 2573 int framesize = ra_->C->frame_size_in_bytes(); 2574 Register temp_reg = G3; 2575 __ add(SP, framesize, temp_reg); 2576 __ cmp(temp_reg, FP); 2577 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc); 2578 } 2579 %} 2580 2581 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value 2582 // to G1 so the register allocator will not have to deal with the misaligned register 2583 // pair. 2584 enc_class adjust_long_from_native_call %{ 2585 #ifndef _LP64 2586 if (returns_long()) { 2587 // sllx O0,32,O0 2588 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 ); 2589 // srl O1,0,O1 2590 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 ); 2591 // or O0,O1,G1 2592 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc ); 2593 } 2594 #endif 2595 %} 2596 2597 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime 2598 // CALL directly to the runtime 2599 // The user of this is responsible for ensuring that R_L7 is empty (killed). 2600 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type, 2601 /*preserve_g2=*/true); 2602 %} 2603 2604 enc_class preserve_SP %{ 2605 MacroAssembler _masm(&cbuf); 2606 __ mov(SP, L7_mh_SP_save); 2607 %} 2608 2609 enc_class restore_SP %{ 2610 MacroAssembler _masm(&cbuf); 2611 __ mov(L7_mh_SP_save, SP); 2612 %} 2613 2614 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 2615 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2616 // who we intended to call. 2617 if (!_method) { 2618 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type); 2619 } else if (_optimized_virtual) { 2620 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type); 2621 } else { 2622 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type); 2623 } 2624 if (_method) { // Emit stub for static call. 2625 address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); 2626 // Stub does not fit into scratch buffer if TraceJumps is enabled 2627 if (stub == NULL && !(TraceJumps && Compile::current()->in_scratch_emit_size())) { 2628 ciEnv::current()->record_failure("CodeCache is full"); 2629 return; 2630 } 2631 } 2632 %} 2633 2634 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL 2635 MacroAssembler _masm(&cbuf); 2636 __ set_inst_mark(); 2637 int vtable_index = this->_vtable_index; 2638 // MachCallDynamicJavaNode::ret_addr_offset uses this same test 2639 if (vtable_index < 0) { 2640 // must be invalid_vtable_index, not nonvirtual_vtable_index 2641 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value"); 2642 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2643 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()"); 2644 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub"); 2645 __ ic_call((address)$meth$$method); 2646 } else { 2647 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 2648 // Just go thru the vtable 2649 // get receiver klass (receiver already checked for non-null) 2650 // If we end up going thru a c2i adapter interpreter expects method in G5 2651 int off = __ offset(); 2652 __ load_klass(O0, G3_scratch); 2653 int klass_load_size; 2654 if (UseCompressedClassPointers) { 2655 assert(Universe::heap() != NULL, "java heap should be initialized"); 2656 klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord; 2657 } else { 2658 klass_load_size = 1*BytesPerInstWord; 2659 } 2660 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); 2661 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); 2662 if (Assembler::is_simm13(v_off)) { 2663 __ ld_ptr(G3, v_off, G5_method); 2664 } else { 2665 // Generate 2 instructions 2666 __ Assembler::sethi(v_off & ~0x3ff, G5_method); 2667 __ or3(G5_method, v_off & 0x3ff, G5_method); 2668 // ld_ptr, set_hi, set 2669 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord, 2670 "Unexpected instruction size(s)"); 2671 __ ld_ptr(G3, G5_method, G5_method); 2672 } 2673 // NOTE: for vtable dispatches, the vtable entry will never be null. 2674 // However it may very well end up in handle_wrong_method if the 2675 // method is abstract for the particular class. 2676 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch); 2677 // jump to target (either compiled code or c2iadapter) 2678 __ jmpl(G3_scratch, G0, O7); 2679 __ delayed()->nop(); 2680 } 2681 %} 2682 2683 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL 2684 MacroAssembler _masm(&cbuf); 2685 2686 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 2687 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because 2688 // we might be calling a C2I adapter which needs it. 2689 2690 assert(temp_reg != G5_ic_reg, "conflicting registers"); 2691 // Load nmethod 2692 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg); 2693 2694 // CALL to compiled java, indirect the contents of G3 2695 __ set_inst_mark(); 2696 __ callr(temp_reg, G0); 2697 __ delayed()->nop(); 2698 %} 2699 2700 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{ 2701 MacroAssembler _masm(&cbuf); 2702 Register Rdividend = reg_to_register_object($src1$$reg); 2703 Register Rdivisor = reg_to_register_object($src2$$reg); 2704 Register Rresult = reg_to_register_object($dst$$reg); 2705 2706 __ sra(Rdivisor, 0, Rdivisor); 2707 __ sra(Rdividend, 0, Rdividend); 2708 __ sdivx(Rdividend, Rdivisor, Rresult); 2709 %} 2710 2711 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{ 2712 MacroAssembler _masm(&cbuf); 2713 2714 Register Rdividend = reg_to_register_object($src1$$reg); 2715 int divisor = $imm$$constant; 2716 Register Rresult = reg_to_register_object($dst$$reg); 2717 2718 __ sra(Rdividend, 0, Rdividend); 2719 __ sdivx(Rdividend, divisor, Rresult); 2720 %} 2721 2722 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{ 2723 MacroAssembler _masm(&cbuf); 2724 Register Rsrc1 = reg_to_register_object($src1$$reg); 2725 Register Rsrc2 = reg_to_register_object($src2$$reg); 2726 Register Rdst = reg_to_register_object($dst$$reg); 2727 2728 __ sra( Rsrc1, 0, Rsrc1 ); 2729 __ sra( Rsrc2, 0, Rsrc2 ); 2730 __ mulx( Rsrc1, Rsrc2, Rdst ); 2731 __ srlx( Rdst, 32, Rdst ); 2732 %} 2733 2734 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{ 2735 MacroAssembler _masm(&cbuf); 2736 Register Rdividend = reg_to_register_object($src1$$reg); 2737 Register Rdivisor = reg_to_register_object($src2$$reg); 2738 Register Rresult = reg_to_register_object($dst$$reg); 2739 Register Rscratch = reg_to_register_object($scratch$$reg); 2740 2741 assert(Rdividend != Rscratch, ""); 2742 assert(Rdivisor != Rscratch, ""); 2743 2744 __ sra(Rdividend, 0, Rdividend); 2745 __ sra(Rdivisor, 0, Rdivisor); 2746 __ sdivx(Rdividend, Rdivisor, Rscratch); 2747 __ mulx(Rscratch, Rdivisor, Rscratch); 2748 __ sub(Rdividend, Rscratch, Rresult); 2749 %} 2750 2751 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{ 2752 MacroAssembler _masm(&cbuf); 2753 2754 Register Rdividend = reg_to_register_object($src1$$reg); 2755 int divisor = $imm$$constant; 2756 Register Rresult = reg_to_register_object($dst$$reg); 2757 Register Rscratch = reg_to_register_object($scratch$$reg); 2758 2759 assert(Rdividend != Rscratch, ""); 2760 2761 __ sra(Rdividend, 0, Rdividend); 2762 __ sdivx(Rdividend, divisor, Rscratch); 2763 __ mulx(Rscratch, divisor, Rscratch); 2764 __ sub(Rdividend, Rscratch, Rresult); 2765 %} 2766 2767 enc_class fabss (sflt_reg dst, sflt_reg src) %{ 2768 MacroAssembler _masm(&cbuf); 2769 2770 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2771 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2772 2773 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst); 2774 %} 2775 2776 enc_class fabsd (dflt_reg dst, dflt_reg src) %{ 2777 MacroAssembler _masm(&cbuf); 2778 2779 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2780 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2781 2782 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst); 2783 %} 2784 2785 enc_class fnegd (dflt_reg dst, dflt_reg src) %{ 2786 MacroAssembler _masm(&cbuf); 2787 2788 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2789 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2790 2791 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst); 2792 %} 2793 2794 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{ 2795 MacroAssembler _masm(&cbuf); 2796 2797 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2798 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2799 2800 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst); 2801 %} 2802 2803 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{ 2804 MacroAssembler _masm(&cbuf); 2805 2806 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2807 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2808 2809 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst); 2810 %} 2811 2812 enc_class fmovs (dflt_reg dst, dflt_reg src) %{ 2813 MacroAssembler _masm(&cbuf); 2814 2815 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg); 2816 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg); 2817 2818 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst); 2819 %} 2820 2821 enc_class fmovd (dflt_reg dst, dflt_reg src) %{ 2822 MacroAssembler _masm(&cbuf); 2823 2824 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg); 2825 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg); 2826 2827 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst); 2828 %} 2829 2830 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2831 MacroAssembler _masm(&cbuf); 2832 2833 Register Roop = reg_to_register_object($oop$$reg); 2834 Register Rbox = reg_to_register_object($box$$reg); 2835 Register Rscratch = reg_to_register_object($scratch$$reg); 2836 Register Rmark = reg_to_register_object($scratch2$$reg); 2837 2838 assert(Roop != Rscratch, ""); 2839 assert(Roop != Rmark, ""); 2840 assert(Rbox != Rscratch, ""); 2841 assert(Rbox != Rmark, ""); 2842 2843 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining); 2844 %} 2845 2846 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{ 2847 MacroAssembler _masm(&cbuf); 2848 2849 Register Roop = reg_to_register_object($oop$$reg); 2850 Register Rbox = reg_to_register_object($box$$reg); 2851 Register Rscratch = reg_to_register_object($scratch$$reg); 2852 Register Rmark = reg_to_register_object($scratch2$$reg); 2853 2854 assert(Roop != Rscratch, ""); 2855 assert(Roop != Rmark, ""); 2856 assert(Rbox != Rscratch, ""); 2857 assert(Rbox != Rmark, ""); 2858 2859 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining); 2860 %} 2861 2862 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{ 2863 MacroAssembler _masm(&cbuf); 2864 Register Rmem = reg_to_register_object($mem$$reg); 2865 Register Rold = reg_to_register_object($old$$reg); 2866 Register Rnew = reg_to_register_object($new$$reg); 2867 2868 __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 2869 __ cmp( Rold, Rnew ); 2870 %} 2871 2872 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{ 2873 Register Rmem = reg_to_register_object($mem$$reg); 2874 Register Rold = reg_to_register_object($old$$reg); 2875 Register Rnew = reg_to_register_object($new$$reg); 2876 2877 MacroAssembler _masm(&cbuf); 2878 __ mov(Rnew, O7); 2879 __ casx(Rmem, Rold, O7); 2880 __ cmp( Rold, O7 ); 2881 %} 2882 2883 // raw int cas, used for compareAndSwap 2884 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{ 2885 Register Rmem = reg_to_register_object($mem$$reg); 2886 Register Rold = reg_to_register_object($old$$reg); 2887 Register Rnew = reg_to_register_object($new$$reg); 2888 2889 MacroAssembler _masm(&cbuf); 2890 __ mov(Rnew, O7); 2891 __ cas(Rmem, Rold, O7); 2892 __ cmp( Rold, O7 ); 2893 %} 2894 2895 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{ 2896 Register Rres = reg_to_register_object($res$$reg); 2897 2898 MacroAssembler _masm(&cbuf); 2899 __ mov(1, Rres); 2900 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres ); 2901 %} 2902 2903 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{ 2904 Register Rres = reg_to_register_object($res$$reg); 2905 2906 MacroAssembler _masm(&cbuf); 2907 __ mov(1, Rres); 2908 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres ); 2909 %} 2910 2911 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{ 2912 MacroAssembler _masm(&cbuf); 2913 Register Rdst = reg_to_register_object($dst$$reg); 2914 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg) 2915 : reg_to_DoubleFloatRegister_object($src1$$reg); 2916 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg) 2917 : reg_to_DoubleFloatRegister_object($src2$$reg); 2918 2919 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1) 2920 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 2921 %} 2922 2923 2924 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{ 2925 Label Ldone, Lloop; 2926 MacroAssembler _masm(&cbuf); 2927 2928 Register str1_reg = reg_to_register_object($str1$$reg); 2929 Register str2_reg = reg_to_register_object($str2$$reg); 2930 Register cnt1_reg = reg_to_register_object($cnt1$$reg); 2931 Register cnt2_reg = reg_to_register_object($cnt2$$reg); 2932 Register result_reg = reg_to_register_object($result$$reg); 2933 2934 assert(result_reg != str1_reg && 2935 result_reg != str2_reg && 2936 result_reg != cnt1_reg && 2937 result_reg != cnt2_reg , 2938 "need different registers"); 2939 2940 // Compute the minimum of the string lengths(str1_reg) and the 2941 // difference of the string lengths (stack) 2942 2943 // See if the lengths are different, and calculate min in str1_reg. 2944 // Stash diff in O7 in case we need it for a tie-breaker. 2945 Label Lskip; 2946 __ subcc(cnt1_reg, cnt2_reg, O7); 2947 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2948 __ br(Assembler::greater, true, Assembler::pt, Lskip); 2949 // cnt2 is shorter, so use its count: 2950 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit 2951 __ bind(Lskip); 2952 2953 // reallocate cnt1_reg, cnt2_reg, result_reg 2954 // Note: limit_reg holds the string length pre-scaled by 2 2955 Register limit_reg = cnt1_reg; 2956 Register chr2_reg = cnt2_reg; 2957 Register chr1_reg = result_reg; 2958 // str{12} are the base pointers 2959 2960 // Is the minimum length zero? 2961 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity 2962 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2963 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2964 2965 // Load first characters 2966 __ lduh(str1_reg, 0, chr1_reg); 2967 __ lduh(str2_reg, 0, chr2_reg); 2968 2969 // Compare first characters 2970 __ subcc(chr1_reg, chr2_reg, chr1_reg); 2971 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 2972 assert(chr1_reg == result_reg, "result must be pre-placed"); 2973 __ delayed()->nop(); 2974 2975 { 2976 // Check after comparing first character to see if strings are equivalent 2977 Label LSkip2; 2978 // Check if the strings start at same location 2979 __ cmp(str1_reg, str2_reg); 2980 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2); 2981 __ delayed()->nop(); 2982 2983 // Check if the length difference is zero (in O7) 2984 __ cmp(G0, O7); 2985 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2986 __ delayed()->mov(G0, result_reg); // result is zero 2987 2988 // Strings might not be equal 2989 __ bind(LSkip2); 2990 } 2991 2992 // We have no guarantee that on 64 bit the higher half of limit_reg is 0 2993 __ signx(limit_reg); 2994 2995 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg); 2996 __ br(Assembler::equal, true, Assembler::pn, Ldone); 2997 __ delayed()->mov(O7, result_reg); // result is difference in lengths 2998 2999 // Shift str1_reg and str2_reg to the end of the arrays, negate limit 3000 __ add(str1_reg, limit_reg, str1_reg); 3001 __ add(str2_reg, limit_reg, str2_reg); 3002 __ neg(chr1_reg, limit_reg); // limit = -(limit-2) 3003 3004 // Compare the rest of the characters 3005 __ lduh(str1_reg, limit_reg, chr1_reg); 3006 __ bind(Lloop); 3007 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3008 __ lduh(str2_reg, limit_reg, chr2_reg); 3009 __ subcc(chr1_reg, chr2_reg, chr1_reg); 3010 __ br(Assembler::notZero, false, Assembler::pt, Ldone); 3011 assert(chr1_reg == result_reg, "result must be pre-placed"); 3012 __ delayed()->inccc(limit_reg, sizeof(jchar)); 3013 // annul LDUH if branch is not taken to prevent access past end of string 3014 __ br(Assembler::notZero, true, Assembler::pt, Lloop); 3015 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3016 3017 // If strings are equal up to min length, return the length difference. 3018 __ mov(O7, result_reg); 3019 3020 // Otherwise, return the difference between the first mismatched chars. 3021 __ bind(Ldone); 3022 %} 3023 3024 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{ 3025 Label Lchar, Lchar_loop, Ldone; 3026 MacroAssembler _masm(&cbuf); 3027 3028 Register str1_reg = reg_to_register_object($str1$$reg); 3029 Register str2_reg = reg_to_register_object($str2$$reg); 3030 Register cnt_reg = reg_to_register_object($cnt$$reg); 3031 Register tmp1_reg = O7; 3032 Register result_reg = reg_to_register_object($result$$reg); 3033 3034 assert(result_reg != str1_reg && 3035 result_reg != str2_reg && 3036 result_reg != cnt_reg && 3037 result_reg != tmp1_reg , 3038 "need different registers"); 3039 3040 __ cmp(str1_reg, str2_reg); //same char[] ? 3041 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3042 __ delayed()->add(G0, 1, result_reg); 3043 3044 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn); 3045 __ delayed()->add(G0, 1, result_reg); // count == 0 3046 3047 //rename registers 3048 Register limit_reg = cnt_reg; 3049 Register chr1_reg = result_reg; 3050 Register chr2_reg = tmp1_reg; 3051 3052 // We have no guarantee that on 64 bit the higher half of limit_reg is 0 3053 __ signx(limit_reg); 3054 3055 //check for alignment and position the pointers to the ends 3056 __ or3(str1_reg, str2_reg, chr1_reg); 3057 __ andcc(chr1_reg, 0x3, chr1_reg); 3058 // notZero means at least one not 4-byte aligned. 3059 // We could optimize the case when both arrays are not aligned 3060 // but it is not frequent case and it requires additional checks. 3061 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare 3062 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count 3063 3064 // Compare char[] arrays aligned to 4 bytes. 3065 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg, 3066 chr1_reg, chr2_reg, Ldone); 3067 __ ba(Ldone); 3068 __ delayed()->add(G0, 1, result_reg); 3069 3070 // char by char compare 3071 __ bind(Lchar); 3072 __ add(str1_reg, limit_reg, str1_reg); 3073 __ add(str2_reg, limit_reg, str2_reg); 3074 __ neg(limit_reg); //negate count 3075 3076 __ lduh(str1_reg, limit_reg, chr1_reg); 3077 // Lchar_loop 3078 __ bind(Lchar_loop); 3079 __ lduh(str2_reg, limit_reg, chr2_reg); 3080 __ cmp(chr1_reg, chr2_reg); 3081 __ br(Assembler::notEqual, true, Assembler::pt, Ldone); 3082 __ delayed()->mov(G0, result_reg); //not equal 3083 __ inccc(limit_reg, sizeof(jchar)); 3084 // annul LDUH if branch is not taken to prevent access past end of string 3085 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop); 3086 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted 3087 3088 __ add(G0, 1, result_reg); //equal 3089 3090 __ bind(Ldone); 3091 %} 3092 3093 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{ 3094 Label Lvector, Ldone, Lloop; 3095 MacroAssembler _masm(&cbuf); 3096 3097 Register ary1_reg = reg_to_register_object($ary1$$reg); 3098 Register ary2_reg = reg_to_register_object($ary2$$reg); 3099 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 3100 Register tmp2_reg = O7; 3101 Register result_reg = reg_to_register_object($result$$reg); 3102 3103 int length_offset = arrayOopDesc::length_offset_in_bytes(); 3104 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR); 3105 3106 // return true if the same array 3107 __ cmp(ary1_reg, ary2_reg); 3108 __ brx(Assembler::equal, true, Assembler::pn, Ldone); 3109 __ delayed()->add(G0, 1, result_reg); // equal 3110 3111 __ br_null(ary1_reg, true, Assembler::pn, Ldone); 3112 __ delayed()->mov(G0, result_reg); // not equal 3113 3114 __ br_null(ary2_reg, true, Assembler::pn, Ldone); 3115 __ delayed()->mov(G0, result_reg); // not equal 3116 3117 //load the lengths of arrays 3118 __ ld(Address(ary1_reg, length_offset), tmp1_reg); 3119 __ ld(Address(ary2_reg, length_offset), tmp2_reg); 3120 3121 // return false if the two arrays are not equal length 3122 __ cmp(tmp1_reg, tmp2_reg); 3123 __ br(Assembler::notEqual, true, Assembler::pn, Ldone); 3124 __ delayed()->mov(G0, result_reg); // not equal 3125 3126 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn); 3127 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal 3128 3129 // load array addresses 3130 __ add(ary1_reg, base_offset, ary1_reg); 3131 __ add(ary2_reg, base_offset, ary2_reg); 3132 3133 // renaming registers 3134 Register chr1_reg = result_reg; // for characters in ary1 3135 Register chr2_reg = tmp2_reg; // for characters in ary2 3136 Register limit_reg = tmp1_reg; // length 3137 3138 // set byte count 3139 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); 3140 3141 // Compare char[] arrays aligned to 4 bytes. 3142 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg, 3143 chr1_reg, chr2_reg, Ldone); 3144 __ add(G0, 1, result_reg); // equals 3145 3146 __ bind(Ldone); 3147 %} 3148 3149 enc_class enc_rethrow() %{ 3150 cbuf.set_insts_mark(); 3151 Register temp_reg = G3; 3152 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 3153 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 3154 MacroAssembler _masm(&cbuf); 3155 #ifdef ASSERT 3156 __ save_frame(0); 3157 AddressLiteral last_rethrow_addrlit(&last_rethrow); 3158 __ sethi(last_rethrow_addrlit, L1); 3159 Address addr(L1, last_rethrow_addrlit.low10()); 3160 __ rdpc(L2); 3161 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 3162 __ st_ptr(L2, addr); 3163 __ restore(); 3164 #endif 3165 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp 3166 __ delayed()->nop(); 3167 %} 3168 3169 enc_class emit_mem_nop() %{ 3170 // Generates the instruction LDUXA [o6,g0],#0x82,g0 3171 cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 3172 %} 3173 3174 enc_class emit_fadd_nop() %{ 3175 // Generates the instruction FMOVS f31,f31 3176 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 3177 %} 3178 3179 enc_class emit_br_nop() %{ 3180 // Generates the instruction BPN,PN . 3181 cbuf.insts()->emit_int32((unsigned int) 0x00400000); 3182 %} 3183 3184 enc_class enc_membar_acquire %{ 3185 MacroAssembler _masm(&cbuf); 3186 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) ); 3187 %} 3188 3189 enc_class enc_membar_release %{ 3190 MacroAssembler _masm(&cbuf); 3191 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) ); 3192 %} 3193 3194 enc_class enc_membar_volatile %{ 3195 MacroAssembler _masm(&cbuf); 3196 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) ); 3197 %} 3198 3199 %} 3200 3201 //----------FRAME-------------------------------------------------------------- 3202 // Definition of frame structure and management information. 3203 // 3204 // S T A C K L A Y O U T Allocators stack-slot number 3205 // | (to get allocators register number 3206 // G Owned by | | v add VMRegImpl::stack0) 3207 // r CALLER | | 3208 // o | +--------+ pad to even-align allocators stack-slot 3209 // w V | pad0 | numbers; owned by CALLER 3210 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3211 // h ^ | in | 5 3212 // | | args | 4 Holes in incoming args owned by SELF 3213 // | | | | 3 3214 // | | +--------+ 3215 // V | | old out| Empty on Intel, window on Sparc 3216 // | old |preserve| Must be even aligned. 3217 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned 3218 // | | in | 3 area for Intel ret address 3219 // Owned by |preserve| Empty on Sparc. 3220 // SELF +--------+ 3221 // | | pad2 | 2 pad to align old SP 3222 // | +--------+ 1 3223 // | | locks | 0 3224 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned 3225 // | | pad1 | 11 pad to align new SP 3226 // | +--------+ 3227 // | | | 10 3228 // | | spills | 9 spills 3229 // V | | 8 (pad0 slot for callee) 3230 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3231 // ^ | out | 7 3232 // | | args | 6 Holes in outgoing args owned by CALLEE 3233 // Owned by +--------+ 3234 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3235 // | new |preserve| Must be even-aligned. 3236 // | SP-+--------+----> Matcher::_new_SP, even aligned 3237 // | | | 3238 // 3239 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3240 // known from SELF's arguments and the Java calling convention. 3241 // Region 6-7 is determined per call site. 3242 // Note 2: If the calling convention leaves holes in the incoming argument 3243 // area, those holes are owned by SELF. Holes in the outgoing area 3244 // are owned by the CALLEE. Holes should not be nessecary in the 3245 // incoming area, as the Java calling convention is completely under 3246 // the control of the AD file. Doubles can be sorted and packed to 3247 // avoid holes. Holes in the outgoing arguments may be necessary for 3248 // varargs C calling conventions. 3249 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3250 // even aligned with pad0 as needed. 3251 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3252 // region 6-11 is even aligned; it may be padded out more so that 3253 // the region from SP to FP meets the minimum stack alignment. 3254 3255 frame %{ 3256 // What direction does stack grow in (assumed to be same for native & Java) 3257 stack_direction(TOWARDS_LOW); 3258 3259 // These two registers define part of the calling convention 3260 // between compiled code and the interpreter. 3261 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C 3262 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter 3263 3264 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset] 3265 cisc_spilling_operand_name(indOffset); 3266 3267 // Number of stack slots consumed by a Monitor enter 3268 #ifdef _LP64 3269 sync_stack_slots(2); 3270 #else 3271 sync_stack_slots(1); 3272 #endif 3273 3274 // Compiled code's Frame Pointer 3275 frame_pointer(R_SP); 3276 3277 // Stack alignment requirement 3278 stack_alignment(StackAlignmentInBytes); 3279 // LP64: Alignment size in bytes (128-bit -> 16 bytes) 3280 // !LP64: Alignment size in bytes (64-bit -> 8 bytes) 3281 3282 // Number of stack slots between incoming argument block and the start of 3283 // a new frame. The PROLOG must add this many slots to the stack. The 3284 // EPILOG must remove this many slots. 3285 in_preserve_stack_slots(0); 3286 3287 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3288 // for calls to C. Supports the var-args backing area for register parms. 3289 // ADLC doesn't support parsing expressions, so I folded the math by hand. 3290 #ifdef _LP64 3291 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word 3292 varargs_C_out_slots_killed(12); 3293 #else 3294 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word 3295 varargs_C_out_slots_killed( 7); 3296 #endif 3297 3298 // The after-PROLOG location of the return address. Location of 3299 // return address specifies a type (REG or STACK) and a number 3300 // representing the register number (i.e. - use a register name) or 3301 // stack slot. 3302 return_addr(REG R_I7); // Ret Addr is in register I7 3303 3304 // Body of function which returns an OptoRegs array locating 3305 // arguments either in registers or in stack slots for calling 3306 // java 3307 calling_convention %{ 3308 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing); 3309 3310 %} 3311 3312 // Body of function which returns an OptoRegs array locating 3313 // arguments either in registers or in stack slots for calling 3314 // C. 3315 c_calling_convention %{ 3316 // This is obviously always outgoing 3317 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length); 3318 %} 3319 3320 // Location of native (C/C++) and interpreter return values. This is specified to 3321 // be the same as Java. In the 32-bit VM, long values are actually returned from 3322 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying 3323 // to and from the register pairs is done by the appropriate call and epilog 3324 // opcodes. This simplifies the register allocator. 3325 c_return_value %{ 3326 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3327 #ifdef _LP64 3328 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3329 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3330 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3331 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3332 #else // !_LP64 3333 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3334 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3335 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3336 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num }; 3337 #endif 3338 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3339 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3340 %} 3341 3342 // Location of compiled Java return values. Same as C 3343 return_value %{ 3344 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" ); 3345 #ifdef _LP64 3346 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num }; 3347 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num}; 3348 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num }; 3349 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num}; 3350 #else // !_LP64 3351 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num }; 3352 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3353 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num }; 3354 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num}; 3355 #endif 3356 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg], 3357 (is_outgoing?lo_out:lo_in)[ideal_reg] ); 3358 %} 3359 3360 %} 3361 3362 3363 //----------ATTRIBUTES--------------------------------------------------------- 3364 //----------Operand Attributes------------------------------------------------- 3365 op_attrib op_cost(1); // Required cost attribute 3366 3367 //----------Instruction Attributes--------------------------------------------- 3368 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute 3369 ins_attrib ins_size(32); // Required size attribute (in bits) 3370 3371 // avoid_back_to_back attribute is an expression that must return 3372 // one of the following values defined in MachNode: 3373 // AVOID_NONE - instruction can be placed anywhere 3374 // AVOID_BEFORE - instruction cannot be placed after an 3375 // instruction with MachNode::AVOID_AFTER 3376 // AVOID_AFTER - the next instruction cannot be the one 3377 // with MachNode::AVOID_BEFORE 3378 // AVOID_BEFORE_AND_AFTER - BEFORE and AFTER attributes at 3379 // the same time 3380 ins_attrib ins_avoid_back_to_back(MachNode::AVOID_NONE); 3381 3382 ins_attrib ins_short_branch(0); // Required flag: is this instruction a 3383 // non-matching short branch variant of some 3384 // long branch? 3385 3386 //----------OPERANDS----------------------------------------------------------- 3387 // Operand definitions must precede instruction definitions for correct parsing 3388 // in the ADLC because operands constitute user defined types which are used in 3389 // instruction definitions. 3390 3391 //----------Simple Operands---------------------------------------------------- 3392 // Immediate Operands 3393 // Integer Immediate: 32-bit 3394 operand immI() %{ 3395 match(ConI); 3396 3397 op_cost(0); 3398 // formats are generated automatically for constants and base registers 3399 format %{ %} 3400 interface(CONST_INTER); 3401 %} 3402 3403 // Integer Immediate: 0-bit 3404 operand immI0() %{ 3405 predicate(n->get_int() == 0); 3406 match(ConI); 3407 op_cost(0); 3408 3409 format %{ %} 3410 interface(CONST_INTER); 3411 %} 3412 3413 // Integer Immediate: 5-bit 3414 operand immI5() %{ 3415 predicate(Assembler::is_simm5(n->get_int())); 3416 match(ConI); 3417 op_cost(0); 3418 format %{ %} 3419 interface(CONST_INTER); 3420 %} 3421 3422 // Integer Immediate: 8-bit 3423 operand immI8() %{ 3424 predicate(Assembler::is_simm8(n->get_int())); 3425 match(ConI); 3426 op_cost(0); 3427 format %{ %} 3428 interface(CONST_INTER); 3429 %} 3430 3431 // Integer Immediate: the value 10 3432 operand immI10() %{ 3433 predicate(n->get_int() == 10); 3434 match(ConI); 3435 op_cost(0); 3436 3437 format %{ %} 3438 interface(CONST_INTER); 3439 %} 3440 3441 // Integer Immediate: 11-bit 3442 operand immI11() %{ 3443 predicate(Assembler::is_simm11(n->get_int())); 3444 match(ConI); 3445 op_cost(0); 3446 format %{ %} 3447 interface(CONST_INTER); 3448 %} 3449 3450 // Integer Immediate: 13-bit 3451 operand immI13() %{ 3452 predicate(Assembler::is_simm13(n->get_int())); 3453 match(ConI); 3454 op_cost(0); 3455 3456 format %{ %} 3457 interface(CONST_INTER); 3458 %} 3459 3460 // Integer Immediate: 13-bit minus 7 3461 operand immI13m7() %{ 3462 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095)); 3463 match(ConI); 3464 op_cost(0); 3465 3466 format %{ %} 3467 interface(CONST_INTER); 3468 %} 3469 3470 // Integer Immediate: 16-bit 3471 operand immI16() %{ 3472 predicate(Assembler::is_simm16(n->get_int())); 3473 match(ConI); 3474 op_cost(0); 3475 format %{ %} 3476 interface(CONST_INTER); 3477 %} 3478 3479 // Integer Immediate: the values 1-31 3480 operand immI_1_31() %{ 3481 predicate(n->get_int() >= 1 && n->get_int() <= 31); 3482 match(ConI); 3483 op_cost(0); 3484 3485 format %{ %} 3486 interface(CONST_INTER); 3487 %} 3488 3489 // Integer Immediate: the values 32-63 3490 operand immI_32_63() %{ 3491 predicate(n->get_int() >= 32 && n->get_int() <= 63); 3492 match(ConI); 3493 op_cost(0); 3494 3495 format %{ %} 3496 interface(CONST_INTER); 3497 %} 3498 3499 // Immediates for special shifts (sign extend) 3500 3501 // Integer Immediate: the value 16 3502 operand immI_16() %{ 3503 predicate(n->get_int() == 16); 3504 match(ConI); 3505 op_cost(0); 3506 3507 format %{ %} 3508 interface(CONST_INTER); 3509 %} 3510 3511 // Integer Immediate: the value 24 3512 operand immI_24() %{ 3513 predicate(n->get_int() == 24); 3514 match(ConI); 3515 op_cost(0); 3516 3517 format %{ %} 3518 interface(CONST_INTER); 3519 %} 3520 // Integer Immediate: the value 255 3521 operand immI_255() %{ 3522 predicate( n->get_int() == 255 ); 3523 match(ConI); 3524 op_cost(0); 3525 3526 format %{ %} 3527 interface(CONST_INTER); 3528 %} 3529 3530 // Integer Immediate: the value 65535 3531 operand immI_65535() %{ 3532 predicate(n->get_int() == 65535); 3533 match(ConI); 3534 op_cost(0); 3535 3536 format %{ %} 3537 interface(CONST_INTER); 3538 %} 3539 3540 // Integer Immediate: the values 0-31 3541 operand immU5() %{ 3542 predicate(n->get_int() >= 0 && n->get_int() <= 31); 3543 match(ConI); 3544 op_cost(0); 3545 3546 format %{ %} 3547 interface(CONST_INTER); 3548 %} 3549 3550 // Integer Immediate: 6-bit 3551 operand immU6() %{ 3552 predicate(n->get_int() >= 0 && n->get_int() <= 63); 3553 match(ConI); 3554 op_cost(0); 3555 format %{ %} 3556 interface(CONST_INTER); 3557 %} 3558 3559 // Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13) 3560 operand immU12() %{ 3561 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int())); 3562 match(ConI); 3563 op_cost(0); 3564 3565 format %{ %} 3566 interface(CONST_INTER); 3567 %} 3568 3569 // Integer Immediate non-negative 3570 operand immU31() 3571 %{ 3572 predicate(n->get_int() >= 0); 3573 match(ConI); 3574 3575 op_cost(0); 3576 format %{ %} 3577 interface(CONST_INTER); 3578 %} 3579 3580 // Long Immediate: the value FF 3581 operand immL_FF() %{ 3582 predicate( n->get_long() == 0xFFL ); 3583 match(ConL); 3584 op_cost(0); 3585 3586 format %{ %} 3587 interface(CONST_INTER); 3588 %} 3589 3590 // Long Immediate: the value FFFF 3591 operand immL_FFFF() %{ 3592 predicate( n->get_long() == 0xFFFFL ); 3593 match(ConL); 3594 op_cost(0); 3595 3596 format %{ %} 3597 interface(CONST_INTER); 3598 %} 3599 3600 // Pointer Immediate: 32 or 64-bit 3601 operand immP() %{ 3602 match(ConP); 3603 3604 op_cost(5); 3605 // formats are generated automatically for constants and base registers 3606 format %{ %} 3607 interface(CONST_INTER); 3608 %} 3609 3610 #ifdef _LP64 3611 // Pointer Immediate: 64-bit 3612 operand immP_set() %{ 3613 predicate(!VM_Version::is_niagara_plus()); 3614 match(ConP); 3615 3616 op_cost(5); 3617 // formats are generated automatically for constants and base registers 3618 format %{ %} 3619 interface(CONST_INTER); 3620 %} 3621 3622 // Pointer Immediate: 64-bit 3623 // From Niagara2 processors on a load should be better than materializing. 3624 operand immP_load() %{ 3625 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3))); 3626 match(ConP); 3627 3628 op_cost(5); 3629 // formats are generated automatically for constants and base registers 3630 format %{ %} 3631 interface(CONST_INTER); 3632 %} 3633 3634 // Pointer Immediate: 64-bit 3635 operand immP_no_oop_cheap() %{ 3636 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3)); 3637 match(ConP); 3638 3639 op_cost(5); 3640 // formats are generated automatically for constants and base registers 3641 format %{ %} 3642 interface(CONST_INTER); 3643 %} 3644 #endif 3645 3646 operand immP13() %{ 3647 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 3648 match(ConP); 3649 op_cost(0); 3650 3651 format %{ %} 3652 interface(CONST_INTER); 3653 %} 3654 3655 operand immP0() %{ 3656 predicate(n->get_ptr() == 0); 3657 match(ConP); 3658 op_cost(0); 3659 3660 format %{ %} 3661 interface(CONST_INTER); 3662 %} 3663 3664 operand immP_poll() %{ 3665 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 3666 match(ConP); 3667 3668 // formats are generated automatically for constants and base registers 3669 format %{ %} 3670 interface(CONST_INTER); 3671 %} 3672 3673 // Pointer Immediate 3674 operand immN() 3675 %{ 3676 match(ConN); 3677 3678 op_cost(10); 3679 format %{ %} 3680 interface(CONST_INTER); 3681 %} 3682 3683 operand immNKlass() 3684 %{ 3685 match(ConNKlass); 3686 3687 op_cost(10); 3688 format %{ %} 3689 interface(CONST_INTER); 3690 %} 3691 3692 // NULL Pointer Immediate 3693 operand immN0() 3694 %{ 3695 predicate(n->get_narrowcon() == 0); 3696 match(ConN); 3697 3698 op_cost(0); 3699 format %{ %} 3700 interface(CONST_INTER); 3701 %} 3702 3703 operand immL() %{ 3704 match(ConL); 3705 op_cost(40); 3706 // formats are generated automatically for constants and base registers 3707 format %{ %} 3708 interface(CONST_INTER); 3709 %} 3710 3711 operand immL0() %{ 3712 predicate(n->get_long() == 0L); 3713 match(ConL); 3714 op_cost(0); 3715 // formats are generated automatically for constants and base registers 3716 format %{ %} 3717 interface(CONST_INTER); 3718 %} 3719 3720 // Integer Immediate: 5-bit 3721 operand immL5() %{ 3722 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long())); 3723 match(ConL); 3724 op_cost(0); 3725 format %{ %} 3726 interface(CONST_INTER); 3727 %} 3728 3729 // Long Immediate: 13-bit 3730 operand immL13() %{ 3731 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L)); 3732 match(ConL); 3733 op_cost(0); 3734 3735 format %{ %} 3736 interface(CONST_INTER); 3737 %} 3738 3739 // Long Immediate: 13-bit minus 7 3740 operand immL13m7() %{ 3741 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L)); 3742 match(ConL); 3743 op_cost(0); 3744 3745 format %{ %} 3746 interface(CONST_INTER); 3747 %} 3748 3749 // Long Immediate: low 32-bit mask 3750 operand immL_32bits() %{ 3751 predicate(n->get_long() == 0xFFFFFFFFL); 3752 match(ConL); 3753 op_cost(0); 3754 3755 format %{ %} 3756 interface(CONST_INTER); 3757 %} 3758 3759 // Long Immediate: cheap (materialize in <= 3 instructions) 3760 operand immL_cheap() %{ 3761 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3); 3762 match(ConL); 3763 op_cost(0); 3764 3765 format %{ %} 3766 interface(CONST_INTER); 3767 %} 3768 3769 // Long Immediate: expensive (materialize in > 3 instructions) 3770 operand immL_expensive() %{ 3771 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3); 3772 match(ConL); 3773 op_cost(0); 3774 3775 format %{ %} 3776 interface(CONST_INTER); 3777 %} 3778 3779 // Double Immediate 3780 operand immD() %{ 3781 match(ConD); 3782 3783 op_cost(40); 3784 format %{ %} 3785 interface(CONST_INTER); 3786 %} 3787 3788 // Double Immediate: +0.0d 3789 operand immD0() %{ 3790 predicate(jlong_cast(n->getd()) == 0); 3791 match(ConD); 3792 3793 op_cost(0); 3794 format %{ %} 3795 interface(CONST_INTER); 3796 %} 3797 3798 // Float Immediate 3799 operand immF() %{ 3800 match(ConF); 3801 3802 op_cost(20); 3803 format %{ %} 3804 interface(CONST_INTER); 3805 %} 3806 3807 // Float Immediate: +0.0f 3808 operand immF0() %{ 3809 predicate(jint_cast(n->getf()) == 0); 3810 match(ConF); 3811 3812 op_cost(0); 3813 format %{ %} 3814 interface(CONST_INTER); 3815 %} 3816 3817 // Integer Register Operands 3818 // Integer Register 3819 operand iRegI() %{ 3820 constraint(ALLOC_IN_RC(int_reg)); 3821 match(RegI); 3822 3823 match(notemp_iRegI); 3824 match(g1RegI); 3825 match(o0RegI); 3826 match(iRegIsafe); 3827 3828 format %{ %} 3829 interface(REG_INTER); 3830 %} 3831 3832 operand notemp_iRegI() %{ 3833 constraint(ALLOC_IN_RC(notemp_int_reg)); 3834 match(RegI); 3835 3836 match(o0RegI); 3837 3838 format %{ %} 3839 interface(REG_INTER); 3840 %} 3841 3842 operand o0RegI() %{ 3843 constraint(ALLOC_IN_RC(o0_regI)); 3844 match(iRegI); 3845 3846 format %{ %} 3847 interface(REG_INTER); 3848 %} 3849 3850 // Pointer Register 3851 operand iRegP() %{ 3852 constraint(ALLOC_IN_RC(ptr_reg)); 3853 match(RegP); 3854 3855 match(lock_ptr_RegP); 3856 match(g1RegP); 3857 match(g2RegP); 3858 match(g3RegP); 3859 match(g4RegP); 3860 match(i0RegP); 3861 match(o0RegP); 3862 match(o1RegP); 3863 match(l7RegP); 3864 3865 format %{ %} 3866 interface(REG_INTER); 3867 %} 3868 3869 operand sp_ptr_RegP() %{ 3870 constraint(ALLOC_IN_RC(sp_ptr_reg)); 3871 match(RegP); 3872 match(iRegP); 3873 3874 format %{ %} 3875 interface(REG_INTER); 3876 %} 3877 3878 operand lock_ptr_RegP() %{ 3879 constraint(ALLOC_IN_RC(lock_ptr_reg)); 3880 match(RegP); 3881 match(i0RegP); 3882 match(o0RegP); 3883 match(o1RegP); 3884 match(l7RegP); 3885 3886 format %{ %} 3887 interface(REG_INTER); 3888 %} 3889 3890 operand g1RegP() %{ 3891 constraint(ALLOC_IN_RC(g1_regP)); 3892 match(iRegP); 3893 3894 format %{ %} 3895 interface(REG_INTER); 3896 %} 3897 3898 operand g2RegP() %{ 3899 constraint(ALLOC_IN_RC(g2_regP)); 3900 match(iRegP); 3901 3902 format %{ %} 3903 interface(REG_INTER); 3904 %} 3905 3906 operand g3RegP() %{ 3907 constraint(ALLOC_IN_RC(g3_regP)); 3908 match(iRegP); 3909 3910 format %{ %} 3911 interface(REG_INTER); 3912 %} 3913 3914 operand g1RegI() %{ 3915 constraint(ALLOC_IN_RC(g1_regI)); 3916 match(iRegI); 3917 3918 format %{ %} 3919 interface(REG_INTER); 3920 %} 3921 3922 operand g3RegI() %{ 3923 constraint(ALLOC_IN_RC(g3_regI)); 3924 match(iRegI); 3925 3926 format %{ %} 3927 interface(REG_INTER); 3928 %} 3929 3930 operand g4RegI() %{ 3931 constraint(ALLOC_IN_RC(g4_regI)); 3932 match(iRegI); 3933 3934 format %{ %} 3935 interface(REG_INTER); 3936 %} 3937 3938 operand g4RegP() %{ 3939 constraint(ALLOC_IN_RC(g4_regP)); 3940 match(iRegP); 3941 3942 format %{ %} 3943 interface(REG_INTER); 3944 %} 3945 3946 operand i0RegP() %{ 3947 constraint(ALLOC_IN_RC(i0_regP)); 3948 match(iRegP); 3949 3950 format %{ %} 3951 interface(REG_INTER); 3952 %} 3953 3954 operand o0RegP() %{ 3955 constraint(ALLOC_IN_RC(o0_regP)); 3956 match(iRegP); 3957 3958 format %{ %} 3959 interface(REG_INTER); 3960 %} 3961 3962 operand o1RegP() %{ 3963 constraint(ALLOC_IN_RC(o1_regP)); 3964 match(iRegP); 3965 3966 format %{ %} 3967 interface(REG_INTER); 3968 %} 3969 3970 operand o2RegP() %{ 3971 constraint(ALLOC_IN_RC(o2_regP)); 3972 match(iRegP); 3973 3974 format %{ %} 3975 interface(REG_INTER); 3976 %} 3977 3978 operand o7RegP() %{ 3979 constraint(ALLOC_IN_RC(o7_regP)); 3980 match(iRegP); 3981 3982 format %{ %} 3983 interface(REG_INTER); 3984 %} 3985 3986 operand l7RegP() %{ 3987 constraint(ALLOC_IN_RC(l7_regP)); 3988 match(iRegP); 3989 3990 format %{ %} 3991 interface(REG_INTER); 3992 %} 3993 3994 operand o7RegI() %{ 3995 constraint(ALLOC_IN_RC(o7_regI)); 3996 match(iRegI); 3997 3998 format %{ %} 3999 interface(REG_INTER); 4000 %} 4001 4002 operand iRegN() %{ 4003 constraint(ALLOC_IN_RC(int_reg)); 4004 match(RegN); 4005 4006 format %{ %} 4007 interface(REG_INTER); 4008 %} 4009 4010 // Long Register 4011 operand iRegL() %{ 4012 constraint(ALLOC_IN_RC(long_reg)); 4013 match(RegL); 4014 4015 format %{ %} 4016 interface(REG_INTER); 4017 %} 4018 4019 operand o2RegL() %{ 4020 constraint(ALLOC_IN_RC(o2_regL)); 4021 match(iRegL); 4022 4023 format %{ %} 4024 interface(REG_INTER); 4025 %} 4026 4027 operand o7RegL() %{ 4028 constraint(ALLOC_IN_RC(o7_regL)); 4029 match(iRegL); 4030 4031 format %{ %} 4032 interface(REG_INTER); 4033 %} 4034 4035 operand g1RegL() %{ 4036 constraint(ALLOC_IN_RC(g1_regL)); 4037 match(iRegL); 4038 4039 format %{ %} 4040 interface(REG_INTER); 4041 %} 4042 4043 operand g3RegL() %{ 4044 constraint(ALLOC_IN_RC(g3_regL)); 4045 match(iRegL); 4046 4047 format %{ %} 4048 interface(REG_INTER); 4049 %} 4050 4051 // Int Register safe 4052 // This is 64bit safe 4053 operand iRegIsafe() %{ 4054 constraint(ALLOC_IN_RC(long_reg)); 4055 4056 match(iRegI); 4057 4058 format %{ %} 4059 interface(REG_INTER); 4060 %} 4061 4062 // Condition Code Flag Register 4063 operand flagsReg() %{ 4064 constraint(ALLOC_IN_RC(int_flags)); 4065 match(RegFlags); 4066 4067 format %{ "ccr" %} // both ICC and XCC 4068 interface(REG_INTER); 4069 %} 4070 4071 // Condition Code Register, unsigned comparisons. 4072 operand flagsRegU() %{ 4073 constraint(ALLOC_IN_RC(int_flags)); 4074 match(RegFlags); 4075 4076 format %{ "icc_U" %} 4077 interface(REG_INTER); 4078 %} 4079 4080 // Condition Code Register, pointer comparisons. 4081 operand flagsRegP() %{ 4082 constraint(ALLOC_IN_RC(int_flags)); 4083 match(RegFlags); 4084 4085 #ifdef _LP64 4086 format %{ "xcc_P" %} 4087 #else 4088 format %{ "icc_P" %} 4089 #endif 4090 interface(REG_INTER); 4091 %} 4092 4093 // Condition Code Register, long comparisons. 4094 operand flagsRegL() %{ 4095 constraint(ALLOC_IN_RC(int_flags)); 4096 match(RegFlags); 4097 4098 format %{ "xcc_L" %} 4099 interface(REG_INTER); 4100 %} 4101 4102 // Condition Code Register, floating comparisons, unordered same as "less". 4103 operand flagsRegF() %{ 4104 constraint(ALLOC_IN_RC(float_flags)); 4105 match(RegFlags); 4106 match(flagsRegF0); 4107 4108 format %{ %} 4109 interface(REG_INTER); 4110 %} 4111 4112 operand flagsRegF0() %{ 4113 constraint(ALLOC_IN_RC(float_flag0)); 4114 match(RegFlags); 4115 4116 format %{ %} 4117 interface(REG_INTER); 4118 %} 4119 4120 4121 // Condition Code Flag Register used by long compare 4122 operand flagsReg_long_LTGE() %{ 4123 constraint(ALLOC_IN_RC(int_flags)); 4124 match(RegFlags); 4125 format %{ "icc_LTGE" %} 4126 interface(REG_INTER); 4127 %} 4128 operand flagsReg_long_EQNE() %{ 4129 constraint(ALLOC_IN_RC(int_flags)); 4130 match(RegFlags); 4131 format %{ "icc_EQNE" %} 4132 interface(REG_INTER); 4133 %} 4134 operand flagsReg_long_LEGT() %{ 4135 constraint(ALLOC_IN_RC(int_flags)); 4136 match(RegFlags); 4137 format %{ "icc_LEGT" %} 4138 interface(REG_INTER); 4139 %} 4140 4141 4142 operand regD() %{ 4143 constraint(ALLOC_IN_RC(dflt_reg)); 4144 match(RegD); 4145 4146 match(regD_low); 4147 4148 format %{ %} 4149 interface(REG_INTER); 4150 %} 4151 4152 operand regF() %{ 4153 constraint(ALLOC_IN_RC(sflt_reg)); 4154 match(RegF); 4155 4156 format %{ %} 4157 interface(REG_INTER); 4158 %} 4159 4160 operand regD_low() %{ 4161 constraint(ALLOC_IN_RC(dflt_low_reg)); 4162 match(regD); 4163 4164 format %{ %} 4165 interface(REG_INTER); 4166 %} 4167 4168 // Special Registers 4169 4170 // Method Register 4171 operand inline_cache_regP(iRegP reg) %{ 4172 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1 4173 match(reg); 4174 format %{ %} 4175 interface(REG_INTER); 4176 %} 4177 4178 operand interpreter_method_oop_regP(iRegP reg) %{ 4179 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1 4180 match(reg); 4181 format %{ %} 4182 interface(REG_INTER); 4183 %} 4184 4185 4186 //----------Complex Operands--------------------------------------------------- 4187 // Indirect Memory Reference 4188 operand indirect(sp_ptr_RegP reg) %{ 4189 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4190 match(reg); 4191 4192 op_cost(100); 4193 format %{ "[$reg]" %} 4194 interface(MEMORY_INTER) %{ 4195 base($reg); 4196 index(0x0); 4197 scale(0x0); 4198 disp(0x0); 4199 %} 4200 %} 4201 4202 // Indirect with simm13 Offset 4203 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{ 4204 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4205 match(AddP reg offset); 4206 4207 op_cost(100); 4208 format %{ "[$reg + $offset]" %} 4209 interface(MEMORY_INTER) %{ 4210 base($reg); 4211 index(0x0); 4212 scale(0x0); 4213 disp($offset); 4214 %} 4215 %} 4216 4217 // Indirect with simm13 Offset minus 7 4218 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{ 4219 constraint(ALLOC_IN_RC(sp_ptr_reg)); 4220 match(AddP reg offset); 4221 4222 op_cost(100); 4223 format %{ "[$reg + $offset]" %} 4224 interface(MEMORY_INTER) %{ 4225 base($reg); 4226 index(0x0); 4227 scale(0x0); 4228 disp($offset); 4229 %} 4230 %} 4231 4232 // Note: Intel has a swapped version also, like this: 4233 //operand indOffsetX(iRegI reg, immP offset) %{ 4234 // constraint(ALLOC_IN_RC(int_reg)); 4235 // match(AddP offset reg); 4236 // 4237 // op_cost(100); 4238 // format %{ "[$reg + $offset]" %} 4239 // interface(MEMORY_INTER) %{ 4240 // base($reg); 4241 // index(0x0); 4242 // scale(0x0); 4243 // disp($offset); 4244 // %} 4245 //%} 4246 //// However, it doesn't make sense for SPARC, since 4247 // we have no particularly good way to embed oops in 4248 // single instructions. 4249 4250 // Indirect with Register Index 4251 operand indIndex(iRegP addr, iRegX index) %{ 4252 constraint(ALLOC_IN_RC(ptr_reg)); 4253 match(AddP addr index); 4254 4255 op_cost(100); 4256 format %{ "[$addr + $index]" %} 4257 interface(MEMORY_INTER) %{ 4258 base($addr); 4259 index($index); 4260 scale(0x0); 4261 disp(0x0); 4262 %} 4263 %} 4264 4265 //----------Special Memory Operands-------------------------------------------- 4266 // Stack Slot Operand - This operand is used for loading and storing temporary 4267 // values on the stack where a match requires a value to 4268 // flow through memory. 4269 operand stackSlotI(sRegI reg) %{ 4270 constraint(ALLOC_IN_RC(stack_slots)); 4271 op_cost(100); 4272 //match(RegI); 4273 format %{ "[$reg]" %} 4274 interface(MEMORY_INTER) %{ 4275 base(0xE); // R_SP 4276 index(0x0); 4277 scale(0x0); 4278 disp($reg); // Stack Offset 4279 %} 4280 %} 4281 4282 operand stackSlotP(sRegP reg) %{ 4283 constraint(ALLOC_IN_RC(stack_slots)); 4284 op_cost(100); 4285 //match(RegP); 4286 format %{ "[$reg]" %} 4287 interface(MEMORY_INTER) %{ 4288 base(0xE); // R_SP 4289 index(0x0); 4290 scale(0x0); 4291 disp($reg); // Stack Offset 4292 %} 4293 %} 4294 4295 operand stackSlotF(sRegF reg) %{ 4296 constraint(ALLOC_IN_RC(stack_slots)); 4297 op_cost(100); 4298 //match(RegF); 4299 format %{ "[$reg]" %} 4300 interface(MEMORY_INTER) %{ 4301 base(0xE); // R_SP 4302 index(0x0); 4303 scale(0x0); 4304 disp($reg); // Stack Offset 4305 %} 4306 %} 4307 operand stackSlotD(sRegD reg) %{ 4308 constraint(ALLOC_IN_RC(stack_slots)); 4309 op_cost(100); 4310 //match(RegD); 4311 format %{ "[$reg]" %} 4312 interface(MEMORY_INTER) %{ 4313 base(0xE); // R_SP 4314 index(0x0); 4315 scale(0x0); 4316 disp($reg); // Stack Offset 4317 %} 4318 %} 4319 operand stackSlotL(sRegL reg) %{ 4320 constraint(ALLOC_IN_RC(stack_slots)); 4321 op_cost(100); 4322 //match(RegL); 4323 format %{ "[$reg]" %} 4324 interface(MEMORY_INTER) %{ 4325 base(0xE); // R_SP 4326 index(0x0); 4327 scale(0x0); 4328 disp($reg); // Stack Offset 4329 %} 4330 %} 4331 4332 // Operands for expressing Control Flow 4333 // NOTE: Label is a predefined operand which should not be redefined in 4334 // the AD file. It is generically handled within the ADLC. 4335 4336 //----------Conditional Branch Operands---------------------------------------- 4337 // Comparison Op - This is the operation of the comparison, and is limited to 4338 // the following set of codes: 4339 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 4340 // 4341 // Other attributes of the comparison, such as unsignedness, are specified 4342 // by the comparison instruction that sets a condition code flags register. 4343 // That result is represented by a flags operand whose subtype is appropriate 4344 // to the unsignedness (etc.) of the comparison. 4345 // 4346 // Later, the instruction which matches both the Comparison Op (a Bool) and 4347 // the flags (produced by the Cmp) specifies the coding of the comparison op 4348 // by matching a specific subtype of Bool operand below, such as cmpOpU. 4349 4350 operand cmpOp() %{ 4351 match(Bool); 4352 4353 format %{ "" %} 4354 interface(COND_INTER) %{ 4355 equal(0x1); 4356 not_equal(0x9); 4357 less(0x3); 4358 greater_equal(0xB); 4359 less_equal(0x2); 4360 greater(0xA); 4361 overflow(0x7); 4362 no_overflow(0xF); 4363 %} 4364 %} 4365 4366 // Comparison Op, unsigned 4367 operand cmpOpU() %{ 4368 match(Bool); 4369 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4370 n->as_Bool()->_test._test != BoolTest::no_overflow); 4371 4372 format %{ "u" %} 4373 interface(COND_INTER) %{ 4374 equal(0x1); 4375 not_equal(0x9); 4376 less(0x5); 4377 greater_equal(0xD); 4378 less_equal(0x4); 4379 greater(0xC); 4380 overflow(0x7); 4381 no_overflow(0xF); 4382 %} 4383 %} 4384 4385 // Comparison Op, pointer (same as unsigned) 4386 operand cmpOpP() %{ 4387 match(Bool); 4388 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4389 n->as_Bool()->_test._test != BoolTest::no_overflow); 4390 4391 format %{ "p" %} 4392 interface(COND_INTER) %{ 4393 equal(0x1); 4394 not_equal(0x9); 4395 less(0x5); 4396 greater_equal(0xD); 4397 less_equal(0x4); 4398 greater(0xC); 4399 overflow(0x7); 4400 no_overflow(0xF); 4401 %} 4402 %} 4403 4404 // Comparison Op, branch-register encoding 4405 operand cmpOp_reg() %{ 4406 match(Bool); 4407 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4408 n->as_Bool()->_test._test != BoolTest::no_overflow); 4409 4410 format %{ "" %} 4411 interface(COND_INTER) %{ 4412 equal (0x1); 4413 not_equal (0x5); 4414 less (0x3); 4415 greater_equal(0x7); 4416 less_equal (0x2); 4417 greater (0x6); 4418 overflow(0x7); // not supported 4419 no_overflow(0xF); // not supported 4420 %} 4421 %} 4422 4423 // Comparison Code, floating, unordered same as less 4424 operand cmpOpF() %{ 4425 match(Bool); 4426 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4427 n->as_Bool()->_test._test != BoolTest::no_overflow); 4428 4429 format %{ "fl" %} 4430 interface(COND_INTER) %{ 4431 equal(0x9); 4432 not_equal(0x1); 4433 less(0x3); 4434 greater_equal(0xB); 4435 less_equal(0xE); 4436 greater(0x6); 4437 4438 overflow(0x7); // not supported 4439 no_overflow(0xF); // not supported 4440 %} 4441 %} 4442 4443 // Used by long compare 4444 operand cmpOp_commute() %{ 4445 match(Bool); 4446 predicate(n->as_Bool()->_test._test != BoolTest::overflow && 4447 n->as_Bool()->_test._test != BoolTest::no_overflow); 4448 4449 format %{ "" %} 4450 interface(COND_INTER) %{ 4451 equal(0x1); 4452 not_equal(0x9); 4453 less(0xA); 4454 greater_equal(0x2); 4455 less_equal(0xB); 4456 greater(0x3); 4457 overflow(0x7); 4458 no_overflow(0xF); 4459 %} 4460 %} 4461 4462 //----------OPERAND CLASSES---------------------------------------------------- 4463 // Operand Classes are groups of operands that are used to simplify 4464 // instruction definitions by not requiring the AD writer to specify separate 4465 // instructions for every form of operand when the instruction accepts 4466 // multiple operand types with the same basic encoding and format. The classic 4467 // case of this is memory operands. 4468 opclass memory( indirect, indOffset13, indIndex ); 4469 opclass indIndexMemory( indIndex ); 4470 4471 //----------PIPELINE----------------------------------------------------------- 4472 pipeline %{ 4473 4474 //----------ATTRIBUTES--------------------------------------------------------- 4475 attributes %{ 4476 fixed_size_instructions; // Fixed size instructions 4477 branch_has_delay_slot; // Branch has delay slot following 4478 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle 4479 instruction_unit_size = 4; // An instruction is 4 bytes long 4480 instruction_fetch_unit_size = 16; // The processor fetches one line 4481 instruction_fetch_units = 1; // of 16 bytes 4482 4483 // List of nop instructions 4484 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR ); 4485 %} 4486 4487 //----------RESOURCES---------------------------------------------------------- 4488 // Resources are the functional units available to the machine 4489 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1); 4490 4491 //----------PIPELINE DESCRIPTION----------------------------------------------- 4492 // Pipeline Description specifies the stages in the machine's pipeline 4493 4494 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D); 4495 4496 //----------PIPELINE CLASSES--------------------------------------------------- 4497 // Pipeline Classes describe the stages in which input and output are 4498 // referenced by the hardware pipeline. 4499 4500 // Integer ALU reg-reg operation 4501 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4502 single_instruction; 4503 dst : E(write); 4504 src1 : R(read); 4505 src2 : R(read); 4506 IALU : R; 4507 %} 4508 4509 // Integer ALU reg-reg long operation 4510 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 4511 instruction_count(2); 4512 dst : E(write); 4513 src1 : R(read); 4514 src2 : R(read); 4515 IALU : R; 4516 IALU : R; 4517 %} 4518 4519 // Integer ALU reg-reg long dependent operation 4520 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{ 4521 instruction_count(1); multiple_bundles; 4522 dst : E(write); 4523 src1 : R(read); 4524 src2 : R(read); 4525 cr : E(write); 4526 IALU : R(2); 4527 %} 4528 4529 // Integer ALU reg-imm operaion 4530 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4531 single_instruction; 4532 dst : E(write); 4533 src1 : R(read); 4534 IALU : R; 4535 %} 4536 4537 // Integer ALU reg-reg operation with condition code 4538 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{ 4539 single_instruction; 4540 dst : E(write); 4541 cr : E(write); 4542 src1 : R(read); 4543 src2 : R(read); 4544 IALU : R; 4545 %} 4546 4547 // Integer ALU reg-imm operation with condition code 4548 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{ 4549 single_instruction; 4550 dst : E(write); 4551 cr : E(write); 4552 src1 : R(read); 4553 IALU : R; 4554 %} 4555 4556 // Integer ALU zero-reg operation 4557 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 4558 single_instruction; 4559 dst : E(write); 4560 src2 : R(read); 4561 IALU : R; 4562 %} 4563 4564 // Integer ALU zero-reg operation with condition code only 4565 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{ 4566 single_instruction; 4567 cr : E(write); 4568 src : R(read); 4569 IALU : R; 4570 %} 4571 4572 // Integer ALU reg-reg operation with condition code only 4573 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4574 single_instruction; 4575 cr : E(write); 4576 src1 : R(read); 4577 src2 : R(read); 4578 IALU : R; 4579 %} 4580 4581 // Integer ALU reg-imm operation with condition code only 4582 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4583 single_instruction; 4584 cr : E(write); 4585 src1 : R(read); 4586 IALU : R; 4587 %} 4588 4589 // Integer ALU reg-reg-zero operation with condition code only 4590 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{ 4591 single_instruction; 4592 cr : E(write); 4593 src1 : R(read); 4594 src2 : R(read); 4595 IALU : R; 4596 %} 4597 4598 // Integer ALU reg-imm-zero operation with condition code only 4599 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{ 4600 single_instruction; 4601 cr : E(write); 4602 src1 : R(read); 4603 IALU : R; 4604 %} 4605 4606 // Integer ALU reg-reg operation with condition code, src1 modified 4607 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{ 4608 single_instruction; 4609 cr : E(write); 4610 src1 : E(write); 4611 src1 : R(read); 4612 src2 : R(read); 4613 IALU : R; 4614 %} 4615 4616 // Integer ALU reg-imm operation with condition code, src1 modified 4617 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{ 4618 single_instruction; 4619 cr : E(write); 4620 src1 : E(write); 4621 src1 : R(read); 4622 IALU : R; 4623 %} 4624 4625 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{ 4626 multiple_bundles; 4627 dst : E(write)+4; 4628 cr : E(write); 4629 src1 : R(read); 4630 src2 : R(read); 4631 IALU : R(3); 4632 BR : R(2); 4633 %} 4634 4635 // Integer ALU operation 4636 pipe_class ialu_none(iRegI dst) %{ 4637 single_instruction; 4638 dst : E(write); 4639 IALU : R; 4640 %} 4641 4642 // Integer ALU reg operation 4643 pipe_class ialu_reg(iRegI dst, iRegI src) %{ 4644 single_instruction; may_have_no_code; 4645 dst : E(write); 4646 src : R(read); 4647 IALU : R; 4648 %} 4649 4650 // Integer ALU reg conditional operation 4651 // This instruction has a 1 cycle stall, and cannot execute 4652 // in the same cycle as the instruction setting the condition 4653 // code. We kludge this by pretending to read the condition code 4654 // 1 cycle earlier, and by marking the functional units as busy 4655 // for 2 cycles with the result available 1 cycle later than 4656 // is really the case. 4657 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{ 4658 single_instruction; 4659 op2_out : C(write); 4660 op1 : R(read); 4661 cr : R(read); // This is really E, with a 1 cycle stall 4662 BR : R(2); 4663 MS : R(2); 4664 %} 4665 4666 #ifdef _LP64 4667 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{ 4668 instruction_count(1); multiple_bundles; 4669 dst : C(write)+1; 4670 src : R(read)+1; 4671 IALU : R(1); 4672 BR : E(2); 4673 MS : E(2); 4674 %} 4675 #endif 4676 4677 // Integer ALU reg operation 4678 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{ 4679 single_instruction; may_have_no_code; 4680 dst : E(write); 4681 src : R(read); 4682 IALU : R; 4683 %} 4684 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{ 4685 single_instruction; may_have_no_code; 4686 dst : E(write); 4687 src : R(read); 4688 IALU : R; 4689 %} 4690 4691 // Two integer ALU reg operations 4692 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{ 4693 instruction_count(2); 4694 dst : E(write); 4695 src : R(read); 4696 A0 : R; 4697 A1 : R; 4698 %} 4699 4700 // Two integer ALU reg operations 4701 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{ 4702 instruction_count(2); may_have_no_code; 4703 dst : E(write); 4704 src : R(read); 4705 A0 : R; 4706 A1 : R; 4707 %} 4708 4709 // Integer ALU imm operation 4710 pipe_class ialu_imm(iRegI dst, immI13 src) %{ 4711 single_instruction; 4712 dst : E(write); 4713 IALU : R; 4714 %} 4715 4716 // Integer ALU reg-reg with carry operation 4717 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{ 4718 single_instruction; 4719 dst : E(write); 4720 src1 : R(read); 4721 src2 : R(read); 4722 IALU : R; 4723 %} 4724 4725 // Integer ALU cc operation 4726 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{ 4727 single_instruction; 4728 dst : E(write); 4729 cc : R(read); 4730 IALU : R; 4731 %} 4732 4733 // Integer ALU cc / second IALU operation 4734 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{ 4735 instruction_count(1); multiple_bundles; 4736 dst : E(write)+1; 4737 src : R(read); 4738 IALU : R; 4739 %} 4740 4741 // Integer ALU cc / second IALU operation 4742 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{ 4743 instruction_count(1); multiple_bundles; 4744 dst : E(write)+1; 4745 p : R(read); 4746 q : R(read); 4747 IALU : R; 4748 %} 4749 4750 // Integer ALU hi-lo-reg operation 4751 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{ 4752 instruction_count(1); multiple_bundles; 4753 dst : E(write)+1; 4754 IALU : R(2); 4755 %} 4756 4757 // Float ALU hi-lo-reg operation (with temp) 4758 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{ 4759 instruction_count(1); multiple_bundles; 4760 dst : E(write)+1; 4761 IALU : R(2); 4762 %} 4763 4764 // Long Constant 4765 pipe_class loadConL( iRegL dst, immL src ) %{ 4766 instruction_count(2); multiple_bundles; 4767 dst : E(write)+1; 4768 IALU : R(2); 4769 IALU : R(2); 4770 %} 4771 4772 // Pointer Constant 4773 pipe_class loadConP( iRegP dst, immP src ) %{ 4774 instruction_count(0); multiple_bundles; 4775 fixed_latency(6); 4776 %} 4777 4778 // Polling Address 4779 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{ 4780 #ifdef _LP64 4781 instruction_count(0); multiple_bundles; 4782 fixed_latency(6); 4783 #else 4784 dst : E(write); 4785 IALU : R; 4786 #endif 4787 %} 4788 4789 // Long Constant small 4790 pipe_class loadConLlo( iRegL dst, immL src ) %{ 4791 instruction_count(2); 4792 dst : E(write); 4793 IALU : R; 4794 IALU : R; 4795 %} 4796 4797 // [PHH] This is wrong for 64-bit. See LdImmF/D. 4798 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{ 4799 instruction_count(1); multiple_bundles; 4800 src : R(read); 4801 dst : M(write)+1; 4802 IALU : R; 4803 MS : E; 4804 %} 4805 4806 // Integer ALU nop operation 4807 pipe_class ialu_nop() %{ 4808 single_instruction; 4809 IALU : R; 4810 %} 4811 4812 // Integer ALU nop operation 4813 pipe_class ialu_nop_A0() %{ 4814 single_instruction; 4815 A0 : R; 4816 %} 4817 4818 // Integer ALU nop operation 4819 pipe_class ialu_nop_A1() %{ 4820 single_instruction; 4821 A1 : R; 4822 %} 4823 4824 // Integer Multiply reg-reg operation 4825 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 4826 single_instruction; 4827 dst : E(write); 4828 src1 : R(read); 4829 src2 : R(read); 4830 MS : R(5); 4831 %} 4832 4833 // Integer Multiply reg-imm operation 4834 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{ 4835 single_instruction; 4836 dst : E(write); 4837 src1 : R(read); 4838 MS : R(5); 4839 %} 4840 4841 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4842 single_instruction; 4843 dst : E(write)+4; 4844 src1 : R(read); 4845 src2 : R(read); 4846 MS : R(6); 4847 %} 4848 4849 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4850 single_instruction; 4851 dst : E(write)+4; 4852 src1 : R(read); 4853 MS : R(6); 4854 %} 4855 4856 // Integer Divide reg-reg 4857 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{ 4858 instruction_count(1); multiple_bundles; 4859 dst : E(write); 4860 temp : E(write); 4861 src1 : R(read); 4862 src2 : R(read); 4863 temp : R(read); 4864 MS : R(38); 4865 %} 4866 4867 // Integer Divide reg-imm 4868 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{ 4869 instruction_count(1); multiple_bundles; 4870 dst : E(write); 4871 temp : E(write); 4872 src1 : R(read); 4873 temp : R(read); 4874 MS : R(38); 4875 %} 4876 4877 // Long Divide 4878 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 4879 dst : E(write)+71; 4880 src1 : R(read); 4881 src2 : R(read)+1; 4882 MS : R(70); 4883 %} 4884 4885 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{ 4886 dst : E(write)+71; 4887 src1 : R(read); 4888 MS : R(70); 4889 %} 4890 4891 // Floating Point Add Float 4892 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{ 4893 single_instruction; 4894 dst : X(write); 4895 src1 : E(read); 4896 src2 : E(read); 4897 FA : R; 4898 %} 4899 4900 // Floating Point Add Double 4901 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{ 4902 single_instruction; 4903 dst : X(write); 4904 src1 : E(read); 4905 src2 : E(read); 4906 FA : R; 4907 %} 4908 4909 // Floating Point Conditional Move based on integer flags 4910 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{ 4911 single_instruction; 4912 dst : X(write); 4913 src : E(read); 4914 cr : R(read); 4915 FA : R(2); 4916 BR : R(2); 4917 %} 4918 4919 // Floating Point Conditional Move based on integer flags 4920 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{ 4921 single_instruction; 4922 dst : X(write); 4923 src : E(read); 4924 cr : R(read); 4925 FA : R(2); 4926 BR : R(2); 4927 %} 4928 4929 // Floating Point Multiply Float 4930 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{ 4931 single_instruction; 4932 dst : X(write); 4933 src1 : E(read); 4934 src2 : E(read); 4935 FM : R; 4936 %} 4937 4938 // Floating Point Multiply Double 4939 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{ 4940 single_instruction; 4941 dst : X(write); 4942 src1 : E(read); 4943 src2 : E(read); 4944 FM : R; 4945 %} 4946 4947 // Floating Point Divide Float 4948 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{ 4949 single_instruction; 4950 dst : X(write); 4951 src1 : E(read); 4952 src2 : E(read); 4953 FM : R; 4954 FDIV : C(14); 4955 %} 4956 4957 // Floating Point Divide Double 4958 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{ 4959 single_instruction; 4960 dst : X(write); 4961 src1 : E(read); 4962 src2 : E(read); 4963 FM : R; 4964 FDIV : C(17); 4965 %} 4966 4967 // Floating Point Move/Negate/Abs Float 4968 pipe_class faddF_reg(regF dst, regF src) %{ 4969 single_instruction; 4970 dst : W(write); 4971 src : E(read); 4972 FA : R(1); 4973 %} 4974 4975 // Floating Point Move/Negate/Abs Double 4976 pipe_class faddD_reg(regD dst, regD src) %{ 4977 single_instruction; 4978 dst : W(write); 4979 src : E(read); 4980 FA : R; 4981 %} 4982 4983 // Floating Point Convert F->D 4984 pipe_class fcvtF2D(regD dst, regF src) %{ 4985 single_instruction; 4986 dst : X(write); 4987 src : E(read); 4988 FA : R; 4989 %} 4990 4991 // Floating Point Convert I->D 4992 pipe_class fcvtI2D(regD dst, regF src) %{ 4993 single_instruction; 4994 dst : X(write); 4995 src : E(read); 4996 FA : R; 4997 %} 4998 4999 // Floating Point Convert LHi->D 5000 pipe_class fcvtLHi2D(regD dst, regD src) %{ 5001 single_instruction; 5002 dst : X(write); 5003 src : E(read); 5004 FA : R; 5005 %} 5006 5007 // Floating Point Convert L->D 5008 pipe_class fcvtL2D(regD dst, regF src) %{ 5009 single_instruction; 5010 dst : X(write); 5011 src : E(read); 5012 FA : R; 5013 %} 5014 5015 // Floating Point Convert L->F 5016 pipe_class fcvtL2F(regD dst, regF src) %{ 5017 single_instruction; 5018 dst : X(write); 5019 src : E(read); 5020 FA : R; 5021 %} 5022 5023 // Floating Point Convert D->F 5024 pipe_class fcvtD2F(regD dst, regF src) %{ 5025 single_instruction; 5026 dst : X(write); 5027 src : E(read); 5028 FA : R; 5029 %} 5030 5031 // Floating Point Convert I->L 5032 pipe_class fcvtI2L(regD dst, regF src) %{ 5033 single_instruction; 5034 dst : X(write); 5035 src : E(read); 5036 FA : R; 5037 %} 5038 5039 // Floating Point Convert D->F 5040 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{ 5041 instruction_count(1); multiple_bundles; 5042 dst : X(write)+6; 5043 src : E(read); 5044 FA : R; 5045 %} 5046 5047 // Floating Point Convert D->L 5048 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{ 5049 instruction_count(1); multiple_bundles; 5050 dst : X(write)+6; 5051 src : E(read); 5052 FA : R; 5053 %} 5054 5055 // Floating Point Convert F->I 5056 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{ 5057 instruction_count(1); multiple_bundles; 5058 dst : X(write)+6; 5059 src : E(read); 5060 FA : R; 5061 %} 5062 5063 // Floating Point Convert F->L 5064 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{ 5065 instruction_count(1); multiple_bundles; 5066 dst : X(write)+6; 5067 src : E(read); 5068 FA : R; 5069 %} 5070 5071 // Floating Point Convert I->F 5072 pipe_class fcvtI2F(regF dst, regF src) %{ 5073 single_instruction; 5074 dst : X(write); 5075 src : E(read); 5076 FA : R; 5077 %} 5078 5079 // Floating Point Compare 5080 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{ 5081 single_instruction; 5082 cr : X(write); 5083 src1 : E(read); 5084 src2 : E(read); 5085 FA : R; 5086 %} 5087 5088 // Floating Point Compare 5089 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{ 5090 single_instruction; 5091 cr : X(write); 5092 src1 : E(read); 5093 src2 : E(read); 5094 FA : R; 5095 %} 5096 5097 // Floating Add Nop 5098 pipe_class fadd_nop() %{ 5099 single_instruction; 5100 FA : R; 5101 %} 5102 5103 // Integer Store to Memory 5104 pipe_class istore_mem_reg(memory mem, iRegI src) %{ 5105 single_instruction; 5106 mem : R(read); 5107 src : C(read); 5108 MS : R; 5109 %} 5110 5111 // Integer Store to Memory 5112 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{ 5113 single_instruction; 5114 mem : R(read); 5115 src : C(read); 5116 MS : R; 5117 %} 5118 5119 // Integer Store Zero to Memory 5120 pipe_class istore_mem_zero(memory mem, immI0 src) %{ 5121 single_instruction; 5122 mem : R(read); 5123 MS : R; 5124 %} 5125 5126 // Special Stack Slot Store 5127 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{ 5128 single_instruction; 5129 stkSlot : R(read); 5130 src : C(read); 5131 MS : R; 5132 %} 5133 5134 // Special Stack Slot Store 5135 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{ 5136 instruction_count(2); multiple_bundles; 5137 stkSlot : R(read); 5138 src : C(read); 5139 MS : R(2); 5140 %} 5141 5142 // Float Store 5143 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{ 5144 single_instruction; 5145 mem : R(read); 5146 src : C(read); 5147 MS : R; 5148 %} 5149 5150 // Float Store 5151 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{ 5152 single_instruction; 5153 mem : R(read); 5154 MS : R; 5155 %} 5156 5157 // Double Store 5158 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{ 5159 instruction_count(1); 5160 mem : R(read); 5161 src : C(read); 5162 MS : R; 5163 %} 5164 5165 // Double Store 5166 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{ 5167 single_instruction; 5168 mem : R(read); 5169 MS : R; 5170 %} 5171 5172 // Special Stack Slot Float Store 5173 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{ 5174 single_instruction; 5175 stkSlot : R(read); 5176 src : C(read); 5177 MS : R; 5178 %} 5179 5180 // Special Stack Slot Double Store 5181 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{ 5182 single_instruction; 5183 stkSlot : R(read); 5184 src : C(read); 5185 MS : R; 5186 %} 5187 5188 // Integer Load (when sign bit propagation not needed) 5189 pipe_class iload_mem(iRegI dst, memory mem) %{ 5190 single_instruction; 5191 mem : R(read); 5192 dst : C(write); 5193 MS : R; 5194 %} 5195 5196 // Integer Load from stack operand 5197 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{ 5198 single_instruction; 5199 mem : R(read); 5200 dst : C(write); 5201 MS : R; 5202 %} 5203 5204 // Integer Load (when sign bit propagation or masking is needed) 5205 pipe_class iload_mask_mem(iRegI dst, memory mem) %{ 5206 single_instruction; 5207 mem : R(read); 5208 dst : M(write); 5209 MS : R; 5210 %} 5211 5212 // Float Load 5213 pipe_class floadF_mem(regF dst, memory mem) %{ 5214 single_instruction; 5215 mem : R(read); 5216 dst : M(write); 5217 MS : R; 5218 %} 5219 5220 // Float Load 5221 pipe_class floadD_mem(regD dst, memory mem) %{ 5222 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case 5223 mem : R(read); 5224 dst : M(write); 5225 MS : R; 5226 %} 5227 5228 // Float Load 5229 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{ 5230 single_instruction; 5231 stkSlot : R(read); 5232 dst : M(write); 5233 MS : R; 5234 %} 5235 5236 // Float Load 5237 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{ 5238 single_instruction; 5239 stkSlot : R(read); 5240 dst : M(write); 5241 MS : R; 5242 %} 5243 5244 // Memory Nop 5245 pipe_class mem_nop() %{ 5246 single_instruction; 5247 MS : R; 5248 %} 5249 5250 pipe_class sethi(iRegP dst, immI src) %{ 5251 single_instruction; 5252 dst : E(write); 5253 IALU : R; 5254 %} 5255 5256 pipe_class loadPollP(iRegP poll) %{ 5257 single_instruction; 5258 poll : R(read); 5259 MS : R; 5260 %} 5261 5262 pipe_class br(Universe br, label labl) %{ 5263 single_instruction_with_delay_slot; 5264 BR : R; 5265 %} 5266 5267 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{ 5268 single_instruction_with_delay_slot; 5269 cr : E(read); 5270 BR : R; 5271 %} 5272 5273 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{ 5274 single_instruction_with_delay_slot; 5275 op1 : E(read); 5276 BR : R; 5277 MS : R; 5278 %} 5279 5280 // Compare and branch 5281 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{ 5282 instruction_count(2); has_delay_slot; 5283 cr : E(write); 5284 src1 : R(read); 5285 src2 : R(read); 5286 IALU : R; 5287 BR : R; 5288 %} 5289 5290 // Compare and branch 5291 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{ 5292 instruction_count(2); has_delay_slot; 5293 cr : E(write); 5294 src1 : R(read); 5295 IALU : R; 5296 BR : R; 5297 %} 5298 5299 // Compare and branch using cbcond 5300 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{ 5301 single_instruction; 5302 src1 : E(read); 5303 src2 : E(read); 5304 IALU : R; 5305 BR : R; 5306 %} 5307 5308 // Compare and branch using cbcond 5309 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{ 5310 single_instruction; 5311 src1 : E(read); 5312 IALU : R; 5313 BR : R; 5314 %} 5315 5316 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{ 5317 single_instruction_with_delay_slot; 5318 cr : E(read); 5319 BR : R; 5320 %} 5321 5322 pipe_class br_nop() %{ 5323 single_instruction; 5324 BR : R; 5325 %} 5326 5327 pipe_class simple_call(method meth) %{ 5328 instruction_count(2); multiple_bundles; force_serialization; 5329 fixed_latency(100); 5330 BR : R(1); 5331 MS : R(1); 5332 A0 : R(1); 5333 %} 5334 5335 pipe_class compiled_call(method meth) %{ 5336 instruction_count(1); multiple_bundles; force_serialization; 5337 fixed_latency(100); 5338 MS : R(1); 5339 %} 5340 5341 pipe_class call(method meth) %{ 5342 instruction_count(0); multiple_bundles; force_serialization; 5343 fixed_latency(100); 5344 %} 5345 5346 pipe_class tail_call(Universe ignore, label labl) %{ 5347 single_instruction; has_delay_slot; 5348 fixed_latency(100); 5349 BR : R(1); 5350 MS : R(1); 5351 %} 5352 5353 pipe_class ret(Universe ignore) %{ 5354 single_instruction; has_delay_slot; 5355 BR : R(1); 5356 MS : R(1); 5357 %} 5358 5359 pipe_class ret_poll(g3RegP poll) %{ 5360 instruction_count(3); has_delay_slot; 5361 poll : E(read); 5362 MS : R; 5363 %} 5364 5365 // The real do-nothing guy 5366 pipe_class empty( ) %{ 5367 instruction_count(0); 5368 %} 5369 5370 pipe_class long_memory_op() %{ 5371 instruction_count(0); multiple_bundles; force_serialization; 5372 fixed_latency(25); 5373 MS : R(1); 5374 %} 5375 5376 // Check-cast 5377 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{ 5378 array : R(read); 5379 match : R(read); 5380 IALU : R(2); 5381 BR : R(2); 5382 MS : R; 5383 %} 5384 5385 // Convert FPU flags into +1,0,-1 5386 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{ 5387 src1 : E(read); 5388 src2 : E(read); 5389 dst : E(write); 5390 FA : R; 5391 MS : R(2); 5392 BR : R(2); 5393 %} 5394 5395 // Compare for p < q, and conditionally add y 5396 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{ 5397 p : E(read); 5398 q : E(read); 5399 y : E(read); 5400 IALU : R(3) 5401 %} 5402 5403 // Perform a compare, then move conditionally in a branch delay slot. 5404 pipe_class min_max( iRegI src2, iRegI srcdst ) %{ 5405 src2 : E(read); 5406 srcdst : E(read); 5407 IALU : R; 5408 BR : R; 5409 %} 5410 5411 // Define the class for the Nop node 5412 define %{ 5413 MachNop = ialu_nop; 5414 %} 5415 5416 %} 5417 5418 //----------INSTRUCTIONS------------------------------------------------------- 5419 5420 //------------Special Stack Slot instructions - no match rules----------------- 5421 instruct stkI_to_regF(regF dst, stackSlotI src) %{ 5422 // No match rule to avoid chain rule match. 5423 effect(DEF dst, USE src); 5424 ins_cost(MEMORY_REF_COST); 5425 size(4); 5426 format %{ "LDF $src,$dst\t! stkI to regF" %} 5427 opcode(Assembler::ldf_op3); 5428 ins_encode(simple_form3_mem_reg(src, dst)); 5429 ins_pipe(floadF_stk); 5430 %} 5431 5432 instruct stkL_to_regD(regD dst, stackSlotL src) %{ 5433 // No match rule to avoid chain rule match. 5434 effect(DEF dst, USE src); 5435 ins_cost(MEMORY_REF_COST); 5436 size(4); 5437 format %{ "LDDF $src,$dst\t! stkL to regD" %} 5438 opcode(Assembler::lddf_op3); 5439 ins_encode(simple_form3_mem_reg(src, dst)); 5440 ins_pipe(floadD_stk); 5441 %} 5442 5443 instruct regF_to_stkI(stackSlotI dst, regF src) %{ 5444 // No match rule to avoid chain rule match. 5445 effect(DEF dst, USE src); 5446 ins_cost(MEMORY_REF_COST); 5447 size(4); 5448 format %{ "STF $src,$dst\t! regF to stkI" %} 5449 opcode(Assembler::stf_op3); 5450 ins_encode(simple_form3_mem_reg(dst, src)); 5451 ins_pipe(fstoreF_stk_reg); 5452 %} 5453 5454 instruct regD_to_stkL(stackSlotL dst, regD src) %{ 5455 // No match rule to avoid chain rule match. 5456 effect(DEF dst, USE src); 5457 ins_cost(MEMORY_REF_COST); 5458 size(4); 5459 format %{ "STDF $src,$dst\t! regD to stkL" %} 5460 opcode(Assembler::stdf_op3); 5461 ins_encode(simple_form3_mem_reg(dst, src)); 5462 ins_pipe(fstoreD_stk_reg); 5463 %} 5464 5465 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{ 5466 effect(DEF dst, USE src); 5467 ins_cost(MEMORY_REF_COST*2); 5468 size(8); 5469 format %{ "STW $src,$dst.hi\t! long\n\t" 5470 "STW R_G0,$dst.lo" %} 5471 opcode(Assembler::stw_op3); 5472 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0)); 5473 ins_pipe(lstoreI_stk_reg); 5474 %} 5475 5476 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{ 5477 // No match rule to avoid chain rule match. 5478 effect(DEF dst, USE src); 5479 ins_cost(MEMORY_REF_COST); 5480 size(4); 5481 format %{ "STX $src,$dst\t! regL to stkD" %} 5482 opcode(Assembler::stx_op3); 5483 ins_encode(simple_form3_mem_reg( dst, src ) ); 5484 ins_pipe(istore_stk_reg); 5485 %} 5486 5487 //---------- Chain stack slots between similar types -------- 5488 5489 // Load integer from stack slot 5490 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{ 5491 match(Set dst src); 5492 ins_cost(MEMORY_REF_COST); 5493 5494 size(4); 5495 format %{ "LDUW $src,$dst\t!stk" %} 5496 opcode(Assembler::lduw_op3); 5497 ins_encode(simple_form3_mem_reg( src, dst ) ); 5498 ins_pipe(iload_mem); 5499 %} 5500 5501 // Store integer to stack slot 5502 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{ 5503 match(Set dst src); 5504 ins_cost(MEMORY_REF_COST); 5505 5506 size(4); 5507 format %{ "STW $src,$dst\t!stk" %} 5508 opcode(Assembler::stw_op3); 5509 ins_encode(simple_form3_mem_reg( dst, src ) ); 5510 ins_pipe(istore_mem_reg); 5511 %} 5512 5513 // Load long from stack slot 5514 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{ 5515 match(Set dst src); 5516 5517 ins_cost(MEMORY_REF_COST); 5518 size(4); 5519 format %{ "LDX $src,$dst\t! long" %} 5520 opcode(Assembler::ldx_op3); 5521 ins_encode(simple_form3_mem_reg( src, dst ) ); 5522 ins_pipe(iload_mem); 5523 %} 5524 5525 // Store long to stack slot 5526 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{ 5527 match(Set dst src); 5528 5529 ins_cost(MEMORY_REF_COST); 5530 size(4); 5531 format %{ "STX $src,$dst\t! long" %} 5532 opcode(Assembler::stx_op3); 5533 ins_encode(simple_form3_mem_reg( dst, src ) ); 5534 ins_pipe(istore_mem_reg); 5535 %} 5536 5537 #ifdef _LP64 5538 // Load pointer from stack slot, 64-bit encoding 5539 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5540 match(Set dst src); 5541 ins_cost(MEMORY_REF_COST); 5542 size(4); 5543 format %{ "LDX $src,$dst\t!ptr" %} 5544 opcode(Assembler::ldx_op3); 5545 ins_encode(simple_form3_mem_reg( src, dst ) ); 5546 ins_pipe(iload_mem); 5547 %} 5548 5549 // Store pointer to stack slot 5550 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5551 match(Set dst src); 5552 ins_cost(MEMORY_REF_COST); 5553 size(4); 5554 format %{ "STX $src,$dst\t!ptr" %} 5555 opcode(Assembler::stx_op3); 5556 ins_encode(simple_form3_mem_reg( dst, src ) ); 5557 ins_pipe(istore_mem_reg); 5558 %} 5559 #else // _LP64 5560 // Load pointer from stack slot, 32-bit encoding 5561 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{ 5562 match(Set dst src); 5563 ins_cost(MEMORY_REF_COST); 5564 format %{ "LDUW $src,$dst\t!ptr" %} 5565 opcode(Assembler::lduw_op3, Assembler::ldst_op); 5566 ins_encode(simple_form3_mem_reg( src, dst ) ); 5567 ins_pipe(iload_mem); 5568 %} 5569 5570 // Store pointer to stack slot 5571 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{ 5572 match(Set dst src); 5573 ins_cost(MEMORY_REF_COST); 5574 format %{ "STW $src,$dst\t!ptr" %} 5575 opcode(Assembler::stw_op3, Assembler::ldst_op); 5576 ins_encode(simple_form3_mem_reg( dst, src ) ); 5577 ins_pipe(istore_mem_reg); 5578 %} 5579 #endif // _LP64 5580 5581 //------------Special Nop instructions for bundling - no match rules----------- 5582 // Nop using the A0 functional unit 5583 instruct Nop_A0() %{ 5584 ins_cost(0); 5585 5586 format %{ "NOP ! Alu Pipeline" %} 5587 opcode(Assembler::or_op3, Assembler::arith_op); 5588 ins_encode( form2_nop() ); 5589 ins_pipe(ialu_nop_A0); 5590 %} 5591 5592 // Nop using the A1 functional unit 5593 instruct Nop_A1( ) %{ 5594 ins_cost(0); 5595 5596 format %{ "NOP ! Alu Pipeline" %} 5597 opcode(Assembler::or_op3, Assembler::arith_op); 5598 ins_encode( form2_nop() ); 5599 ins_pipe(ialu_nop_A1); 5600 %} 5601 5602 // Nop using the memory functional unit 5603 instruct Nop_MS( ) %{ 5604 ins_cost(0); 5605 5606 format %{ "NOP ! Memory Pipeline" %} 5607 ins_encode( emit_mem_nop ); 5608 ins_pipe(mem_nop); 5609 %} 5610 5611 // Nop using the floating add functional unit 5612 instruct Nop_FA( ) %{ 5613 ins_cost(0); 5614 5615 format %{ "NOP ! Floating Add Pipeline" %} 5616 ins_encode( emit_fadd_nop ); 5617 ins_pipe(fadd_nop); 5618 %} 5619 5620 // Nop using the branch functional unit 5621 instruct Nop_BR( ) %{ 5622 ins_cost(0); 5623 5624 format %{ "NOP ! Branch Pipeline" %} 5625 ins_encode( emit_br_nop ); 5626 ins_pipe(br_nop); 5627 %} 5628 5629 //----------Load/Store/Move Instructions--------------------------------------- 5630 //----------Load Instructions-------------------------------------------------- 5631 // Load Byte (8bit signed) 5632 instruct loadB(iRegI dst, memory mem) %{ 5633 match(Set dst (LoadB mem)); 5634 ins_cost(MEMORY_REF_COST); 5635 5636 size(4); 5637 format %{ "LDSB $mem,$dst\t! byte" %} 5638 ins_encode %{ 5639 __ ldsb($mem$$Address, $dst$$Register); 5640 %} 5641 ins_pipe(iload_mask_mem); 5642 %} 5643 5644 // Load Byte (8bit signed) into a Long Register 5645 instruct loadB2L(iRegL dst, memory mem) %{ 5646 match(Set dst (ConvI2L (LoadB mem))); 5647 ins_cost(MEMORY_REF_COST); 5648 5649 size(4); 5650 format %{ "LDSB $mem,$dst\t! byte -> long" %} 5651 ins_encode %{ 5652 __ ldsb($mem$$Address, $dst$$Register); 5653 %} 5654 ins_pipe(iload_mask_mem); 5655 %} 5656 5657 // Load Unsigned Byte (8bit UNsigned) into an int reg 5658 instruct loadUB(iRegI dst, memory mem) %{ 5659 match(Set dst (LoadUB mem)); 5660 ins_cost(MEMORY_REF_COST); 5661 5662 size(4); 5663 format %{ "LDUB $mem,$dst\t! ubyte" %} 5664 ins_encode %{ 5665 __ ldub($mem$$Address, $dst$$Register); 5666 %} 5667 ins_pipe(iload_mem); 5668 %} 5669 5670 // Load Unsigned Byte (8bit UNsigned) into a Long Register 5671 instruct loadUB2L(iRegL dst, memory mem) %{ 5672 match(Set dst (ConvI2L (LoadUB mem))); 5673 ins_cost(MEMORY_REF_COST); 5674 5675 size(4); 5676 format %{ "LDUB $mem,$dst\t! ubyte -> long" %} 5677 ins_encode %{ 5678 __ ldub($mem$$Address, $dst$$Register); 5679 %} 5680 ins_pipe(iload_mem); 5681 %} 5682 5683 // Load Unsigned Byte (8 bit UNsigned) with 32-bit mask into Long Register 5684 instruct loadUB2L_immI(iRegL dst, memory mem, immI mask) %{ 5685 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5686 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5687 5688 size(2*4); 5689 format %{ "LDUB $mem,$dst\t# ubyte & 32-bit mask -> long\n\t" 5690 "AND $dst,right_n_bits($mask, 8),$dst" %} 5691 ins_encode %{ 5692 __ ldub($mem$$Address, $dst$$Register); 5693 __ and3($dst$$Register, $mask$$constant & right_n_bits(8), $dst$$Register); 5694 %} 5695 ins_pipe(iload_mem); 5696 %} 5697 5698 // Load Short (16bit signed) 5699 instruct loadS(iRegI dst, memory mem) %{ 5700 match(Set dst (LoadS mem)); 5701 ins_cost(MEMORY_REF_COST); 5702 5703 size(4); 5704 format %{ "LDSH $mem,$dst\t! short" %} 5705 ins_encode %{ 5706 __ ldsh($mem$$Address, $dst$$Register); 5707 %} 5708 ins_pipe(iload_mask_mem); 5709 %} 5710 5711 // Load Short (16 bit signed) to Byte (8 bit signed) 5712 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5713 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5714 ins_cost(MEMORY_REF_COST); 5715 5716 size(4); 5717 5718 format %{ "LDSB $mem+1,$dst\t! short -> byte" %} 5719 ins_encode %{ 5720 __ ldsb($mem$$Address, $dst$$Register, 1); 5721 %} 5722 ins_pipe(iload_mask_mem); 5723 %} 5724 5725 // Load Short (16bit signed) into a Long Register 5726 instruct loadS2L(iRegL dst, memory mem) %{ 5727 match(Set dst (ConvI2L (LoadS mem))); 5728 ins_cost(MEMORY_REF_COST); 5729 5730 size(4); 5731 format %{ "LDSH $mem,$dst\t! short -> long" %} 5732 ins_encode %{ 5733 __ ldsh($mem$$Address, $dst$$Register); 5734 %} 5735 ins_pipe(iload_mask_mem); 5736 %} 5737 5738 // Load Unsigned Short/Char (16bit UNsigned) 5739 instruct loadUS(iRegI dst, memory mem) %{ 5740 match(Set dst (LoadUS mem)); 5741 ins_cost(MEMORY_REF_COST); 5742 5743 size(4); 5744 format %{ "LDUH $mem,$dst\t! ushort/char" %} 5745 ins_encode %{ 5746 __ lduh($mem$$Address, $dst$$Register); 5747 %} 5748 ins_pipe(iload_mem); 5749 %} 5750 5751 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5752 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5753 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5754 ins_cost(MEMORY_REF_COST); 5755 5756 size(4); 5757 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %} 5758 ins_encode %{ 5759 __ ldsb($mem$$Address, $dst$$Register, 1); 5760 %} 5761 ins_pipe(iload_mask_mem); 5762 %} 5763 5764 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register 5765 instruct loadUS2L(iRegL dst, memory mem) %{ 5766 match(Set dst (ConvI2L (LoadUS mem))); 5767 ins_cost(MEMORY_REF_COST); 5768 5769 size(4); 5770 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %} 5771 ins_encode %{ 5772 __ lduh($mem$$Address, $dst$$Register); 5773 %} 5774 ins_pipe(iload_mem); 5775 %} 5776 5777 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register 5778 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5779 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5780 ins_cost(MEMORY_REF_COST); 5781 5782 size(4); 5783 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %} 5784 ins_encode %{ 5785 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE 5786 %} 5787 ins_pipe(iload_mem); 5788 %} 5789 5790 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register 5791 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{ 5792 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5793 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5794 5795 size(2*4); 5796 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t" 5797 "AND $dst,$mask,$dst" %} 5798 ins_encode %{ 5799 Register Rdst = $dst$$Register; 5800 __ lduh($mem$$Address, Rdst); 5801 __ and3(Rdst, $mask$$constant, Rdst); 5802 %} 5803 ins_pipe(iload_mem); 5804 %} 5805 5806 // Load Unsigned Short/Char (16bit UNsigned) with a 32-bit mask into a Long Register 5807 instruct loadUS2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{ 5808 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5809 effect(TEMP dst, TEMP tmp); 5810 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5811 5812 format %{ "LDUH $mem,$dst\t! ushort/char & 32-bit mask -> long\n\t" 5813 "SET right_n_bits($mask, 16),$tmp\n\t" 5814 "AND $dst,$tmp,$dst" %} 5815 ins_encode %{ 5816 Register Rdst = $dst$$Register; 5817 Register Rtmp = $tmp$$Register; 5818 __ lduh($mem$$Address, Rdst); 5819 __ set($mask$$constant & right_n_bits(16), Rtmp); 5820 __ and3(Rdst, Rtmp, Rdst); 5821 %} 5822 ins_pipe(iload_mem); 5823 %} 5824 5825 // Load Integer 5826 instruct loadI(iRegI dst, memory mem) %{ 5827 match(Set dst (LoadI mem)); 5828 ins_cost(MEMORY_REF_COST); 5829 5830 size(4); 5831 format %{ "LDUW $mem,$dst\t! int" %} 5832 ins_encode %{ 5833 __ lduw($mem$$Address, $dst$$Register); 5834 %} 5835 ins_pipe(iload_mem); 5836 %} 5837 5838 // Load Integer to Byte (8 bit signed) 5839 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{ 5840 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5841 ins_cost(MEMORY_REF_COST); 5842 5843 size(4); 5844 5845 format %{ "LDSB $mem+3,$dst\t! int -> byte" %} 5846 ins_encode %{ 5847 __ ldsb($mem$$Address, $dst$$Register, 3); 5848 %} 5849 ins_pipe(iload_mask_mem); 5850 %} 5851 5852 // Load Integer to Unsigned Byte (8 bit UNsigned) 5853 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{ 5854 match(Set dst (AndI (LoadI mem) mask)); 5855 ins_cost(MEMORY_REF_COST); 5856 5857 size(4); 5858 5859 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %} 5860 ins_encode %{ 5861 __ ldub($mem$$Address, $dst$$Register, 3); 5862 %} 5863 ins_pipe(iload_mask_mem); 5864 %} 5865 5866 // Load Integer to Short (16 bit signed) 5867 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{ 5868 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 5869 ins_cost(MEMORY_REF_COST); 5870 5871 size(4); 5872 5873 format %{ "LDSH $mem+2,$dst\t! int -> short" %} 5874 ins_encode %{ 5875 __ ldsh($mem$$Address, $dst$$Register, 2); 5876 %} 5877 ins_pipe(iload_mask_mem); 5878 %} 5879 5880 // Load Integer to Unsigned Short (16 bit UNsigned) 5881 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{ 5882 match(Set dst (AndI (LoadI mem) mask)); 5883 ins_cost(MEMORY_REF_COST); 5884 5885 size(4); 5886 5887 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %} 5888 ins_encode %{ 5889 __ lduh($mem$$Address, $dst$$Register, 2); 5890 %} 5891 ins_pipe(iload_mask_mem); 5892 %} 5893 5894 // Load Integer into a Long Register 5895 instruct loadI2L(iRegL dst, memory mem) %{ 5896 match(Set dst (ConvI2L (LoadI mem))); 5897 ins_cost(MEMORY_REF_COST); 5898 5899 size(4); 5900 format %{ "LDSW $mem,$dst\t! int -> long" %} 5901 ins_encode %{ 5902 __ ldsw($mem$$Address, $dst$$Register); 5903 %} 5904 ins_pipe(iload_mask_mem); 5905 %} 5906 5907 // Load Integer with mask 0xFF into a Long Register 5908 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{ 5909 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5910 ins_cost(MEMORY_REF_COST); 5911 5912 size(4); 5913 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %} 5914 ins_encode %{ 5915 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE 5916 %} 5917 ins_pipe(iload_mem); 5918 %} 5919 5920 // Load Integer with mask 0xFFFF into a Long Register 5921 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{ 5922 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5923 ins_cost(MEMORY_REF_COST); 5924 5925 size(4); 5926 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %} 5927 ins_encode %{ 5928 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE 5929 %} 5930 ins_pipe(iload_mem); 5931 %} 5932 5933 // Load Integer with a 12-bit mask into a Long Register 5934 instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{ 5935 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5936 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 5937 5938 size(2*4); 5939 format %{ "LDUW $mem,$dst\t! int & 12-bit mask -> long\n\t" 5940 "AND $dst,$mask,$dst" %} 5941 ins_encode %{ 5942 Register Rdst = $dst$$Register; 5943 __ lduw($mem$$Address, Rdst); 5944 __ and3(Rdst, $mask$$constant, Rdst); 5945 %} 5946 ins_pipe(iload_mem); 5947 %} 5948 5949 // Load Integer with a 31-bit mask into a Long Register 5950 instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{ 5951 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 5952 effect(TEMP dst, TEMP tmp); 5953 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST); 5954 5955 format %{ "LDUW $mem,$dst\t! int & 31-bit mask -> long\n\t" 5956 "SET $mask,$tmp\n\t" 5957 "AND $dst,$tmp,$dst" %} 5958 ins_encode %{ 5959 Register Rdst = $dst$$Register; 5960 Register Rtmp = $tmp$$Register; 5961 __ lduw($mem$$Address, Rdst); 5962 __ set($mask$$constant, Rtmp); 5963 __ and3(Rdst, Rtmp, Rdst); 5964 %} 5965 ins_pipe(iload_mem); 5966 %} 5967 5968 // Load Unsigned Integer into a Long Register 5969 instruct loadUI2L(iRegL dst, memory mem, immL_32bits mask) %{ 5970 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 5971 ins_cost(MEMORY_REF_COST); 5972 5973 size(4); 5974 format %{ "LDUW $mem,$dst\t! uint -> long" %} 5975 ins_encode %{ 5976 __ lduw($mem$$Address, $dst$$Register); 5977 %} 5978 ins_pipe(iload_mem); 5979 %} 5980 5981 // Load Long - aligned 5982 instruct loadL(iRegL dst, memory mem ) %{ 5983 match(Set dst (LoadL mem)); 5984 ins_cost(MEMORY_REF_COST); 5985 5986 size(4); 5987 format %{ "LDX $mem,$dst\t! long" %} 5988 ins_encode %{ 5989 __ ldx($mem$$Address, $dst$$Register); 5990 %} 5991 ins_pipe(iload_mem); 5992 %} 5993 5994 // Load Long - UNaligned 5995 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{ 5996 match(Set dst (LoadL_unaligned mem)); 5997 effect(KILL tmp); 5998 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 5999 size(16); 6000 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n" 6001 "\tLDUW $mem ,$dst\n" 6002 "\tSLLX #32, $dst, $dst\n" 6003 "\tOR $dst, R_O7, $dst" %} 6004 opcode(Assembler::lduw_op3); 6005 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst )); 6006 ins_pipe(iload_mem); 6007 %} 6008 6009 // Load Range 6010 instruct loadRange(iRegI dst, memory mem) %{ 6011 match(Set dst (LoadRange mem)); 6012 ins_cost(MEMORY_REF_COST); 6013 6014 size(4); 6015 format %{ "LDUW $mem,$dst\t! range" %} 6016 opcode(Assembler::lduw_op3); 6017 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6018 ins_pipe(iload_mem); 6019 %} 6020 6021 // Load Integer into %f register (for fitos/fitod) 6022 instruct loadI_freg(regF dst, memory mem) %{ 6023 match(Set dst (LoadI mem)); 6024 ins_cost(MEMORY_REF_COST); 6025 size(4); 6026 6027 format %{ "LDF $mem,$dst\t! for fitos/fitod" %} 6028 opcode(Assembler::ldf_op3); 6029 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6030 ins_pipe(floadF_mem); 6031 %} 6032 6033 // Load Pointer 6034 instruct loadP(iRegP dst, memory mem) %{ 6035 match(Set dst (LoadP mem)); 6036 ins_cost(MEMORY_REF_COST); 6037 size(4); 6038 6039 #ifndef _LP64 6040 format %{ "LDUW $mem,$dst\t! ptr" %} 6041 ins_encode %{ 6042 __ lduw($mem$$Address, $dst$$Register); 6043 %} 6044 #else 6045 format %{ "LDX $mem,$dst\t! ptr" %} 6046 ins_encode %{ 6047 __ ldx($mem$$Address, $dst$$Register); 6048 %} 6049 #endif 6050 ins_pipe(iload_mem); 6051 %} 6052 6053 // Load Compressed Pointer 6054 instruct loadN(iRegN dst, memory mem) %{ 6055 match(Set dst (LoadN mem)); 6056 ins_cost(MEMORY_REF_COST); 6057 size(4); 6058 6059 format %{ "LDUW $mem,$dst\t! compressed ptr" %} 6060 ins_encode %{ 6061 __ lduw($mem$$Address, $dst$$Register); 6062 %} 6063 ins_pipe(iload_mem); 6064 %} 6065 6066 // Load Klass Pointer 6067 instruct loadKlass(iRegP dst, memory mem) %{ 6068 match(Set dst (LoadKlass mem)); 6069 ins_cost(MEMORY_REF_COST); 6070 size(4); 6071 6072 #ifndef _LP64 6073 format %{ "LDUW $mem,$dst\t! klass ptr" %} 6074 ins_encode %{ 6075 __ lduw($mem$$Address, $dst$$Register); 6076 %} 6077 #else 6078 format %{ "LDX $mem,$dst\t! klass ptr" %} 6079 ins_encode %{ 6080 __ ldx($mem$$Address, $dst$$Register); 6081 %} 6082 #endif 6083 ins_pipe(iload_mem); 6084 %} 6085 6086 // Load narrow Klass Pointer 6087 instruct loadNKlass(iRegN dst, memory mem) %{ 6088 match(Set dst (LoadNKlass mem)); 6089 ins_cost(MEMORY_REF_COST); 6090 size(4); 6091 6092 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %} 6093 ins_encode %{ 6094 __ lduw($mem$$Address, $dst$$Register); 6095 %} 6096 ins_pipe(iload_mem); 6097 %} 6098 6099 // Load Double 6100 instruct loadD(regD dst, memory mem) %{ 6101 match(Set dst (LoadD mem)); 6102 ins_cost(MEMORY_REF_COST); 6103 6104 size(4); 6105 format %{ "LDDF $mem,$dst" %} 6106 opcode(Assembler::lddf_op3); 6107 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6108 ins_pipe(floadD_mem); 6109 %} 6110 6111 // Load Double - UNaligned 6112 instruct loadD_unaligned(regD_low dst, memory mem ) %{ 6113 match(Set dst (LoadD_unaligned mem)); 6114 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST); 6115 size(8); 6116 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n" 6117 "\tLDF $mem+4,$dst.lo\t!" %} 6118 opcode(Assembler::ldf_op3); 6119 ins_encode( form3_mem_reg_double_unaligned( mem, dst )); 6120 ins_pipe(iload_mem); 6121 %} 6122 6123 // Load Float 6124 instruct loadF(regF dst, memory mem) %{ 6125 match(Set dst (LoadF mem)); 6126 ins_cost(MEMORY_REF_COST); 6127 6128 size(4); 6129 format %{ "LDF $mem,$dst" %} 6130 opcode(Assembler::ldf_op3); 6131 ins_encode(simple_form3_mem_reg( mem, dst ) ); 6132 ins_pipe(floadF_mem); 6133 %} 6134 6135 // Load Constant 6136 instruct loadConI( iRegI dst, immI src ) %{ 6137 match(Set dst src); 6138 ins_cost(DEFAULT_COST * 3/2); 6139 format %{ "SET $src,$dst" %} 6140 ins_encode( Set32(src, dst) ); 6141 ins_pipe(ialu_hi_lo_reg); 6142 %} 6143 6144 instruct loadConI13( iRegI dst, immI13 src ) %{ 6145 match(Set dst src); 6146 6147 size(4); 6148 format %{ "MOV $src,$dst" %} 6149 ins_encode( Set13( src, dst ) ); 6150 ins_pipe(ialu_imm); 6151 %} 6152 6153 #ifndef _LP64 6154 instruct loadConP(iRegP dst, immP con) %{ 6155 match(Set dst con); 6156 ins_cost(DEFAULT_COST * 3/2); 6157 format %{ "SET $con,$dst\t!ptr" %} 6158 ins_encode %{ 6159 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6160 intptr_t val = $con$$constant; 6161 if (constant_reloc == relocInfo::oop_type) { 6162 __ set_oop_constant((jobject) val, $dst$$Register); 6163 } else if (constant_reloc == relocInfo::metadata_type) { 6164 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6165 } else { // non-oop pointers, e.g. card mark base, heap top 6166 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6167 __ set(val, $dst$$Register); 6168 } 6169 %} 6170 ins_pipe(loadConP); 6171 %} 6172 #else 6173 instruct loadConP_set(iRegP dst, immP_set con) %{ 6174 match(Set dst con); 6175 ins_cost(DEFAULT_COST * 3/2); 6176 format %{ "SET $con,$dst\t! ptr" %} 6177 ins_encode %{ 6178 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc(); 6179 intptr_t val = $con$$constant; 6180 if (constant_reloc == relocInfo::oop_type) { 6181 __ set_oop_constant((jobject) val, $dst$$Register); 6182 } else if (constant_reloc == relocInfo::metadata_type) { 6183 __ set_metadata_constant((Metadata*)val, $dst$$Register); 6184 } else { // non-oop pointers, e.g. card mark base, heap top 6185 assert(constant_reloc == relocInfo::none, "unexpected reloc type"); 6186 __ set(val, $dst$$Register); 6187 } 6188 %} 6189 ins_pipe(loadConP); 6190 %} 6191 6192 instruct loadConP_load(iRegP dst, immP_load con) %{ 6193 match(Set dst con); 6194 ins_cost(MEMORY_REF_COST); 6195 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 6196 ins_encode %{ 6197 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6198 __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 6199 %} 6200 ins_pipe(loadConP); 6201 %} 6202 6203 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{ 6204 match(Set dst con); 6205 ins_cost(DEFAULT_COST * 3/2); 6206 format %{ "SET $con,$dst\t! non-oop ptr" %} 6207 ins_encode %{ 6208 if (_opnds[1]->constant_reloc() == relocInfo::metadata_type) { 6209 __ set_metadata_constant((Metadata*)$con$$constant, $dst$$Register); 6210 } else { 6211 __ set($con$$constant, $dst$$Register); 6212 } 6213 %} 6214 ins_pipe(loadConP); 6215 %} 6216 #endif // _LP64 6217 6218 instruct loadConP0(iRegP dst, immP0 src) %{ 6219 match(Set dst src); 6220 6221 size(4); 6222 format %{ "CLR $dst\t!ptr" %} 6223 ins_encode %{ 6224 __ clr($dst$$Register); 6225 %} 6226 ins_pipe(ialu_imm); 6227 %} 6228 6229 instruct loadConP_poll(iRegP dst, immP_poll src) %{ 6230 match(Set dst src); 6231 ins_cost(DEFAULT_COST); 6232 format %{ "SET $src,$dst\t!ptr" %} 6233 ins_encode %{ 6234 AddressLiteral polling_page(os::get_polling_page()); 6235 __ sethi(polling_page, reg_to_register_object($dst$$reg)); 6236 %} 6237 ins_pipe(loadConP_poll); 6238 %} 6239 6240 instruct loadConN0(iRegN dst, immN0 src) %{ 6241 match(Set dst src); 6242 6243 size(4); 6244 format %{ "CLR $dst\t! compressed NULL ptr" %} 6245 ins_encode %{ 6246 __ clr($dst$$Register); 6247 %} 6248 ins_pipe(ialu_imm); 6249 %} 6250 6251 instruct loadConN(iRegN dst, immN src) %{ 6252 match(Set dst src); 6253 ins_cost(DEFAULT_COST * 3/2); 6254 format %{ "SET $src,$dst\t! compressed ptr" %} 6255 ins_encode %{ 6256 Register dst = $dst$$Register; 6257 __ set_narrow_oop((jobject)$src$$constant, dst); 6258 %} 6259 ins_pipe(ialu_hi_lo_reg); 6260 %} 6261 6262 instruct loadConNKlass(iRegN dst, immNKlass src) %{ 6263 match(Set dst src); 6264 ins_cost(DEFAULT_COST * 3/2); 6265 format %{ "SET $src,$dst\t! compressed klass ptr" %} 6266 ins_encode %{ 6267 Register dst = $dst$$Register; 6268 __ set_narrow_klass((Klass*)$src$$constant, dst); 6269 %} 6270 ins_pipe(ialu_hi_lo_reg); 6271 %} 6272 6273 // Materialize long value (predicated by immL_cheap). 6274 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 6275 match(Set dst con); 6276 effect(KILL tmp); 6277 ins_cost(DEFAULT_COST * 3); 6278 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 6279 ins_encode %{ 6280 __ set64($con$$constant, $dst$$Register, $tmp$$Register); 6281 %} 6282 ins_pipe(loadConL); 6283 %} 6284 6285 // Load long value from constant table (predicated by immL_expensive). 6286 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 6287 match(Set dst con); 6288 ins_cost(MEMORY_REF_COST); 6289 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 6290 ins_encode %{ 6291 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 6292 __ ldx($constanttablebase, con_offset, $dst$$Register); 6293 %} 6294 ins_pipe(loadConL); 6295 %} 6296 6297 instruct loadConL0( iRegL dst, immL0 src ) %{ 6298 match(Set dst src); 6299 ins_cost(DEFAULT_COST); 6300 size(4); 6301 format %{ "CLR $dst\t! long" %} 6302 ins_encode( Set13( src, dst ) ); 6303 ins_pipe(ialu_imm); 6304 %} 6305 6306 instruct loadConL13( iRegL dst, immL13 src ) %{ 6307 match(Set dst src); 6308 ins_cost(DEFAULT_COST * 2); 6309 6310 size(4); 6311 format %{ "MOV $src,$dst\t! long" %} 6312 ins_encode( Set13( src, dst ) ); 6313 ins_pipe(ialu_imm); 6314 %} 6315 6316 instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 6317 match(Set dst con); 6318 effect(KILL tmp); 6319 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 6320 ins_encode %{ 6321 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6322 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 6323 %} 6324 ins_pipe(loadConFD); 6325 %} 6326 6327 instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 6328 match(Set dst con); 6329 effect(KILL tmp); 6330 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 6331 ins_encode %{ 6332 // XXX This is a quick fix for 6833573. 6333 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 6334 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 6335 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 6336 %} 6337 ins_pipe(loadConFD); 6338 %} 6339 6340 // Prefetch instructions for allocation. 6341 // Must be safe to execute with invalid address (cannot fault). 6342 6343 instruct prefetchAlloc( memory mem ) %{ 6344 predicate(AllocatePrefetchInstr == 0); 6345 match( PrefetchAllocation mem ); 6346 ins_cost(MEMORY_REF_COST); 6347 size(4); 6348 6349 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %} 6350 opcode(Assembler::prefetch_op3); 6351 ins_encode( form3_mem_prefetch_write( mem ) ); 6352 ins_pipe(iload_mem); 6353 %} 6354 6355 // Use BIS instruction to prefetch for allocation. 6356 // Could fault, need space at the end of TLAB. 6357 instruct prefetchAlloc_bis( iRegP dst ) %{ 6358 predicate(AllocatePrefetchInstr == 1); 6359 match( PrefetchAllocation dst ); 6360 ins_cost(MEMORY_REF_COST); 6361 size(4); 6362 6363 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %} 6364 ins_encode %{ 6365 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY); 6366 %} 6367 ins_pipe(istore_mem_reg); 6368 %} 6369 6370 // Next code is used for finding next cache line address to prefetch. 6371 #ifndef _LP64 6372 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{ 6373 match(Set dst (CastX2P (AndI (CastP2X src) mask))); 6374 ins_cost(DEFAULT_COST); 6375 size(4); 6376 6377 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6378 ins_encode %{ 6379 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6380 %} 6381 ins_pipe(ialu_reg_imm); 6382 %} 6383 #else 6384 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{ 6385 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 6386 ins_cost(DEFAULT_COST); 6387 size(4); 6388 6389 format %{ "AND $src,$mask,$dst\t! next cache line address" %} 6390 ins_encode %{ 6391 __ and3($src$$Register, $mask$$constant, $dst$$Register); 6392 %} 6393 ins_pipe(ialu_reg_imm); 6394 %} 6395 #endif 6396 6397 //----------Store Instructions------------------------------------------------- 6398 // Store Byte 6399 instruct storeB(memory mem, iRegI src) %{ 6400 match(Set mem (StoreB mem src)); 6401 ins_cost(MEMORY_REF_COST); 6402 6403 size(4); 6404 format %{ "STB $src,$mem\t! byte" %} 6405 opcode(Assembler::stb_op3); 6406 ins_encode(simple_form3_mem_reg( mem, src ) ); 6407 ins_pipe(istore_mem_reg); 6408 %} 6409 6410 instruct storeB0(memory mem, immI0 src) %{ 6411 match(Set mem (StoreB mem src)); 6412 ins_cost(MEMORY_REF_COST); 6413 6414 size(4); 6415 format %{ "STB $src,$mem\t! byte" %} 6416 opcode(Assembler::stb_op3); 6417 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6418 ins_pipe(istore_mem_zero); 6419 %} 6420 6421 instruct storeCM0(memory mem, immI0 src) %{ 6422 match(Set mem (StoreCM mem src)); 6423 ins_cost(MEMORY_REF_COST); 6424 6425 size(4); 6426 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %} 6427 opcode(Assembler::stb_op3); 6428 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6429 ins_pipe(istore_mem_zero); 6430 %} 6431 6432 // Store Char/Short 6433 instruct storeC(memory mem, iRegI src) %{ 6434 match(Set mem (StoreC mem src)); 6435 ins_cost(MEMORY_REF_COST); 6436 6437 size(4); 6438 format %{ "STH $src,$mem\t! short" %} 6439 opcode(Assembler::sth_op3); 6440 ins_encode(simple_form3_mem_reg( mem, src ) ); 6441 ins_pipe(istore_mem_reg); 6442 %} 6443 6444 instruct storeC0(memory mem, immI0 src) %{ 6445 match(Set mem (StoreC mem src)); 6446 ins_cost(MEMORY_REF_COST); 6447 6448 size(4); 6449 format %{ "STH $src,$mem\t! short" %} 6450 opcode(Assembler::sth_op3); 6451 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6452 ins_pipe(istore_mem_zero); 6453 %} 6454 6455 // Store Integer 6456 instruct storeI(memory mem, iRegI src) %{ 6457 match(Set mem (StoreI mem src)); 6458 ins_cost(MEMORY_REF_COST); 6459 6460 size(4); 6461 format %{ "STW $src,$mem" %} 6462 opcode(Assembler::stw_op3); 6463 ins_encode(simple_form3_mem_reg( mem, src ) ); 6464 ins_pipe(istore_mem_reg); 6465 %} 6466 6467 // Store Long 6468 instruct storeL(memory mem, iRegL src) %{ 6469 match(Set mem (StoreL mem src)); 6470 ins_cost(MEMORY_REF_COST); 6471 size(4); 6472 format %{ "STX $src,$mem\t! long" %} 6473 opcode(Assembler::stx_op3); 6474 ins_encode(simple_form3_mem_reg( mem, src ) ); 6475 ins_pipe(istore_mem_reg); 6476 %} 6477 6478 instruct storeI0(memory mem, immI0 src) %{ 6479 match(Set mem (StoreI mem src)); 6480 ins_cost(MEMORY_REF_COST); 6481 6482 size(4); 6483 format %{ "STW $src,$mem" %} 6484 opcode(Assembler::stw_op3); 6485 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6486 ins_pipe(istore_mem_zero); 6487 %} 6488 6489 instruct storeL0(memory mem, immL0 src) %{ 6490 match(Set mem (StoreL mem src)); 6491 ins_cost(MEMORY_REF_COST); 6492 6493 size(4); 6494 format %{ "STX $src,$mem" %} 6495 opcode(Assembler::stx_op3); 6496 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6497 ins_pipe(istore_mem_zero); 6498 %} 6499 6500 // Store Integer from float register (used after fstoi) 6501 instruct storeI_Freg(memory mem, regF src) %{ 6502 match(Set mem (StoreI mem src)); 6503 ins_cost(MEMORY_REF_COST); 6504 6505 size(4); 6506 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %} 6507 opcode(Assembler::stf_op3); 6508 ins_encode(simple_form3_mem_reg( mem, src ) ); 6509 ins_pipe(fstoreF_mem_reg); 6510 %} 6511 6512 // Store Pointer 6513 instruct storeP(memory dst, sp_ptr_RegP src) %{ 6514 match(Set dst (StoreP dst src)); 6515 ins_cost(MEMORY_REF_COST); 6516 size(4); 6517 6518 #ifndef _LP64 6519 format %{ "STW $src,$dst\t! ptr" %} 6520 opcode(Assembler::stw_op3, 0, REGP_OP); 6521 #else 6522 format %{ "STX $src,$dst\t! ptr" %} 6523 opcode(Assembler::stx_op3, 0, REGP_OP); 6524 #endif 6525 ins_encode( form3_mem_reg( dst, src ) ); 6526 ins_pipe(istore_mem_spORreg); 6527 %} 6528 6529 instruct storeP0(memory dst, immP0 src) %{ 6530 match(Set dst (StoreP dst src)); 6531 ins_cost(MEMORY_REF_COST); 6532 size(4); 6533 6534 #ifndef _LP64 6535 format %{ "STW $src,$dst\t! ptr" %} 6536 opcode(Assembler::stw_op3, 0, REGP_OP); 6537 #else 6538 format %{ "STX $src,$dst\t! ptr" %} 6539 opcode(Assembler::stx_op3, 0, REGP_OP); 6540 #endif 6541 ins_encode( form3_mem_reg( dst, R_G0 ) ); 6542 ins_pipe(istore_mem_zero); 6543 %} 6544 6545 // Store Compressed Pointer 6546 instruct storeN(memory dst, iRegN src) %{ 6547 match(Set dst (StoreN dst src)); 6548 ins_cost(MEMORY_REF_COST); 6549 size(4); 6550 6551 format %{ "STW $src,$dst\t! compressed ptr" %} 6552 ins_encode %{ 6553 Register base = as_Register($dst$$base); 6554 Register index = as_Register($dst$$index); 6555 Register src = $src$$Register; 6556 if (index != G0) { 6557 __ stw(src, base, index); 6558 } else { 6559 __ stw(src, base, $dst$$disp); 6560 } 6561 %} 6562 ins_pipe(istore_mem_spORreg); 6563 %} 6564 6565 instruct storeNKlass(memory dst, iRegN src) %{ 6566 match(Set dst (StoreNKlass dst src)); 6567 ins_cost(MEMORY_REF_COST); 6568 size(4); 6569 6570 format %{ "STW $src,$dst\t! compressed klass ptr" %} 6571 ins_encode %{ 6572 Register base = as_Register($dst$$base); 6573 Register index = as_Register($dst$$index); 6574 Register src = $src$$Register; 6575 if (index != G0) { 6576 __ stw(src, base, index); 6577 } else { 6578 __ stw(src, base, $dst$$disp); 6579 } 6580 %} 6581 ins_pipe(istore_mem_spORreg); 6582 %} 6583 6584 instruct storeN0(memory dst, immN0 src) %{ 6585 match(Set dst (StoreN dst src)); 6586 ins_cost(MEMORY_REF_COST); 6587 size(4); 6588 6589 format %{ "STW $src,$dst\t! compressed ptr" %} 6590 ins_encode %{ 6591 Register base = as_Register($dst$$base); 6592 Register index = as_Register($dst$$index); 6593 if (index != G0) { 6594 __ stw(0, base, index); 6595 } else { 6596 __ stw(0, base, $dst$$disp); 6597 } 6598 %} 6599 ins_pipe(istore_mem_zero); 6600 %} 6601 6602 // Store Double 6603 instruct storeD( memory mem, regD src) %{ 6604 match(Set mem (StoreD mem src)); 6605 ins_cost(MEMORY_REF_COST); 6606 6607 size(4); 6608 format %{ "STDF $src,$mem" %} 6609 opcode(Assembler::stdf_op3); 6610 ins_encode(simple_form3_mem_reg( mem, src ) ); 6611 ins_pipe(fstoreD_mem_reg); 6612 %} 6613 6614 instruct storeD0( memory mem, immD0 src) %{ 6615 match(Set mem (StoreD mem src)); 6616 ins_cost(MEMORY_REF_COST); 6617 6618 size(4); 6619 format %{ "STX $src,$mem" %} 6620 opcode(Assembler::stx_op3); 6621 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6622 ins_pipe(fstoreD_mem_zero); 6623 %} 6624 6625 // Store Float 6626 instruct storeF( memory mem, regF src) %{ 6627 match(Set mem (StoreF mem src)); 6628 ins_cost(MEMORY_REF_COST); 6629 6630 size(4); 6631 format %{ "STF $src,$mem" %} 6632 opcode(Assembler::stf_op3); 6633 ins_encode(simple_form3_mem_reg( mem, src ) ); 6634 ins_pipe(fstoreF_mem_reg); 6635 %} 6636 6637 instruct storeF0( memory mem, immF0 src) %{ 6638 match(Set mem (StoreF mem src)); 6639 ins_cost(MEMORY_REF_COST); 6640 6641 size(4); 6642 format %{ "STW $src,$mem\t! storeF0" %} 6643 opcode(Assembler::stw_op3); 6644 ins_encode(simple_form3_mem_reg( mem, R_G0 ) ); 6645 ins_pipe(fstoreF_mem_zero); 6646 %} 6647 6648 // Convert oop pointer into compressed form 6649 instruct encodeHeapOop(iRegN dst, iRegP src) %{ 6650 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 6651 match(Set dst (EncodeP src)); 6652 format %{ "encode_heap_oop $src, $dst" %} 6653 ins_encode %{ 6654 __ encode_heap_oop($src$$Register, $dst$$Register); 6655 %} 6656 ins_avoid_back_to_back(Universe::narrow_oop_base() == NULL ? AVOID_NONE : AVOID_BEFORE); 6657 ins_pipe(ialu_reg); 6658 %} 6659 6660 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{ 6661 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 6662 match(Set dst (EncodeP src)); 6663 format %{ "encode_heap_oop_not_null $src, $dst" %} 6664 ins_encode %{ 6665 __ encode_heap_oop_not_null($src$$Register, $dst$$Register); 6666 %} 6667 ins_pipe(ialu_reg); 6668 %} 6669 6670 instruct decodeHeapOop(iRegP dst, iRegN src) %{ 6671 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 6672 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 6673 match(Set dst (DecodeN src)); 6674 format %{ "decode_heap_oop $src, $dst" %} 6675 ins_encode %{ 6676 __ decode_heap_oop($src$$Register, $dst$$Register); 6677 %} 6678 ins_pipe(ialu_reg); 6679 %} 6680 6681 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{ 6682 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 6683 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 6684 match(Set dst (DecodeN src)); 6685 format %{ "decode_heap_oop_not_null $src, $dst" %} 6686 ins_encode %{ 6687 __ decode_heap_oop_not_null($src$$Register, $dst$$Register); 6688 %} 6689 ins_pipe(ialu_reg); 6690 %} 6691 6692 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{ 6693 match(Set dst (EncodePKlass src)); 6694 format %{ "encode_klass_not_null $src, $dst" %} 6695 ins_encode %{ 6696 __ encode_klass_not_null($src$$Register, $dst$$Register); 6697 %} 6698 ins_pipe(ialu_reg); 6699 %} 6700 6701 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{ 6702 match(Set dst (DecodeNKlass src)); 6703 format %{ "decode_klass_not_null $src, $dst" %} 6704 ins_encode %{ 6705 __ decode_klass_not_null($src$$Register, $dst$$Register); 6706 %} 6707 ins_pipe(ialu_reg); 6708 %} 6709 6710 //----------MemBar Instructions----------------------------------------------- 6711 // Memory barrier flavors 6712 6713 instruct membar_acquire() %{ 6714 match(MemBarAcquire); 6715 match(LoadFence); 6716 ins_cost(4*MEMORY_REF_COST); 6717 6718 size(0); 6719 format %{ "MEMBAR-acquire" %} 6720 ins_encode( enc_membar_acquire ); 6721 ins_pipe(long_memory_op); 6722 %} 6723 6724 instruct membar_acquire_lock() %{ 6725 match(MemBarAcquireLock); 6726 ins_cost(0); 6727 6728 size(0); 6729 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %} 6730 ins_encode( ); 6731 ins_pipe(empty); 6732 %} 6733 6734 instruct membar_release() %{ 6735 match(MemBarRelease); 6736 match(StoreFence); 6737 ins_cost(4*MEMORY_REF_COST); 6738 6739 size(0); 6740 format %{ "MEMBAR-release" %} 6741 ins_encode( enc_membar_release ); 6742 ins_pipe(long_memory_op); 6743 %} 6744 6745 instruct membar_release_lock() %{ 6746 match(MemBarReleaseLock); 6747 ins_cost(0); 6748 6749 size(0); 6750 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %} 6751 ins_encode( ); 6752 ins_pipe(empty); 6753 %} 6754 6755 instruct membar_volatile() %{ 6756 match(MemBarVolatile); 6757 ins_cost(4*MEMORY_REF_COST); 6758 6759 size(4); 6760 format %{ "MEMBAR-volatile" %} 6761 ins_encode( enc_membar_volatile ); 6762 ins_pipe(long_memory_op); 6763 %} 6764 6765 instruct unnecessary_membar_volatile() %{ 6766 match(MemBarVolatile); 6767 predicate(Matcher::post_store_load_barrier(n)); 6768 ins_cost(0); 6769 6770 size(0); 6771 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %} 6772 ins_encode( ); 6773 ins_pipe(empty); 6774 %} 6775 6776 instruct membar_storestore() %{ 6777 match(MemBarStoreStore); 6778 ins_cost(0); 6779 6780 size(0); 6781 format %{ "!MEMBAR-storestore (empty encoding)" %} 6782 ins_encode( ); 6783 ins_pipe(empty); 6784 %} 6785 6786 //----------Register Move Instructions----------------------------------------- 6787 instruct roundDouble_nop(regD dst) %{ 6788 match(Set dst (RoundDouble dst)); 6789 ins_cost(0); 6790 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6791 ins_encode( ); 6792 ins_pipe(empty); 6793 %} 6794 6795 6796 instruct roundFloat_nop(regF dst) %{ 6797 match(Set dst (RoundFloat dst)); 6798 ins_cost(0); 6799 // SPARC results are already "rounded" (i.e., normal-format IEEE) 6800 ins_encode( ); 6801 ins_pipe(empty); 6802 %} 6803 6804 6805 // Cast Index to Pointer for unsafe natives 6806 instruct castX2P(iRegX src, iRegP dst) %{ 6807 match(Set dst (CastX2P src)); 6808 6809 format %{ "MOV $src,$dst\t! IntX->Ptr" %} 6810 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6811 ins_pipe(ialu_reg); 6812 %} 6813 6814 // Cast Pointer to Index for unsafe natives 6815 instruct castP2X(iRegP src, iRegX dst) %{ 6816 match(Set dst (CastP2X src)); 6817 6818 format %{ "MOV $src,$dst\t! Ptr->IntX" %} 6819 ins_encode( form3_g0_rs2_rd_move( src, dst ) ); 6820 ins_pipe(ialu_reg); 6821 %} 6822 6823 instruct stfSSD(stackSlotD stkSlot, regD src) %{ 6824 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6825 match(Set stkSlot src); // chain rule 6826 ins_cost(MEMORY_REF_COST); 6827 format %{ "STDF $src,$stkSlot\t!stk" %} 6828 opcode(Assembler::stdf_op3); 6829 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6830 ins_pipe(fstoreD_stk_reg); 6831 %} 6832 6833 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{ 6834 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6835 match(Set dst stkSlot); // chain rule 6836 ins_cost(MEMORY_REF_COST); 6837 format %{ "LDDF $stkSlot,$dst\t!stk" %} 6838 opcode(Assembler::lddf_op3); 6839 ins_encode(simple_form3_mem_reg(stkSlot, dst)); 6840 ins_pipe(floadD_stk); 6841 %} 6842 6843 instruct stfSSF(stackSlotF stkSlot, regF src) %{ 6844 // %%%% TO DO: Tell the coalescer that this kind of node is a copy! 6845 match(Set stkSlot src); // chain rule 6846 ins_cost(MEMORY_REF_COST); 6847 format %{ "STF $src,$stkSlot\t!stk" %} 6848 opcode(Assembler::stf_op3); 6849 ins_encode(simple_form3_mem_reg(stkSlot, src)); 6850 ins_pipe(fstoreF_stk_reg); 6851 %} 6852 6853 //----------Conditional Move--------------------------------------------------- 6854 // Conditional move 6855 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{ 6856 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6857 ins_cost(150); 6858 format %{ "MOV$cmp $pcc,$src,$dst" %} 6859 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6860 ins_pipe(ialu_reg); 6861 %} 6862 6863 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{ 6864 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src))); 6865 ins_cost(140); 6866 format %{ "MOV$cmp $pcc,$src,$dst" %} 6867 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6868 ins_pipe(ialu_imm); 6869 %} 6870 6871 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{ 6872 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6873 ins_cost(150); 6874 size(4); 6875 format %{ "MOV$cmp $icc,$src,$dst" %} 6876 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6877 ins_pipe(ialu_reg); 6878 %} 6879 6880 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{ 6881 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6882 ins_cost(140); 6883 size(4); 6884 format %{ "MOV$cmp $icc,$src,$dst" %} 6885 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6886 ins_pipe(ialu_imm); 6887 %} 6888 6889 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{ 6890 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6891 ins_cost(150); 6892 size(4); 6893 format %{ "MOV$cmp $icc,$src,$dst" %} 6894 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6895 ins_pipe(ialu_reg); 6896 %} 6897 6898 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{ 6899 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src))); 6900 ins_cost(140); 6901 size(4); 6902 format %{ "MOV$cmp $icc,$src,$dst" %} 6903 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 6904 ins_pipe(ialu_imm); 6905 %} 6906 6907 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{ 6908 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6909 ins_cost(150); 6910 size(4); 6911 format %{ "MOV$cmp $fcc,$src,$dst" %} 6912 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6913 ins_pipe(ialu_reg); 6914 %} 6915 6916 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{ 6917 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src))); 6918 ins_cost(140); 6919 size(4); 6920 format %{ "MOV$cmp $fcc,$src,$dst" %} 6921 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 6922 ins_pipe(ialu_imm); 6923 %} 6924 6925 // Conditional move for RegN. Only cmov(reg,reg). 6926 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{ 6927 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src))); 6928 ins_cost(150); 6929 format %{ "MOV$cmp $pcc,$src,$dst" %} 6930 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6931 ins_pipe(ialu_reg); 6932 %} 6933 6934 // This instruction also works with CmpN so we don't need cmovNN_reg. 6935 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{ 6936 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6937 ins_cost(150); 6938 size(4); 6939 format %{ "MOV$cmp $icc,$src,$dst" %} 6940 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6941 ins_pipe(ialu_reg); 6942 %} 6943 6944 // This instruction also works with CmpN so we don't need cmovNN_reg. 6945 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{ 6946 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src))); 6947 ins_cost(150); 6948 size(4); 6949 format %{ "MOV$cmp $icc,$src,$dst" %} 6950 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6951 ins_pipe(ialu_reg); 6952 %} 6953 6954 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{ 6955 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src))); 6956 ins_cost(150); 6957 size(4); 6958 format %{ "MOV$cmp $fcc,$src,$dst" %} 6959 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 6960 ins_pipe(ialu_reg); 6961 %} 6962 6963 // Conditional move 6964 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{ 6965 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6966 ins_cost(150); 6967 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6968 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 6969 ins_pipe(ialu_reg); 6970 %} 6971 6972 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{ 6973 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src))); 6974 ins_cost(140); 6975 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %} 6976 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 6977 ins_pipe(ialu_imm); 6978 %} 6979 6980 // This instruction also works with CmpN so we don't need cmovPN_reg. 6981 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{ 6982 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6983 ins_cost(150); 6984 6985 size(4); 6986 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6987 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6988 ins_pipe(ialu_reg); 6989 %} 6990 6991 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{ 6992 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 6993 ins_cost(150); 6994 6995 size(4); 6996 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 6997 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 6998 ins_pipe(ialu_reg); 6999 %} 7000 7001 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{ 7002 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 7003 ins_cost(140); 7004 7005 size(4); 7006 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 7007 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 7008 ins_pipe(ialu_imm); 7009 %} 7010 7011 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{ 7012 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src))); 7013 ins_cost(140); 7014 7015 size(4); 7016 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %} 7017 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) ); 7018 ins_pipe(ialu_imm); 7019 %} 7020 7021 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{ 7022 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 7023 ins_cost(150); 7024 size(4); 7025 format %{ "MOV$cmp $fcc,$src,$dst" %} 7026 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7027 ins_pipe(ialu_imm); 7028 %} 7029 7030 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{ 7031 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src))); 7032 ins_cost(140); 7033 size(4); 7034 format %{ "MOV$cmp $fcc,$src,$dst" %} 7035 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) ); 7036 ins_pipe(ialu_imm); 7037 %} 7038 7039 // Conditional move 7040 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{ 7041 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src))); 7042 ins_cost(150); 7043 opcode(0x101); 7044 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7045 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7046 ins_pipe(int_conditional_float_move); 7047 %} 7048 7049 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{ 7050 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 7051 ins_cost(150); 7052 7053 size(4); 7054 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7055 opcode(0x101); 7056 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7057 ins_pipe(int_conditional_float_move); 7058 %} 7059 7060 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{ 7061 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src))); 7062 ins_cost(150); 7063 7064 size(4); 7065 format %{ "FMOVS$cmp $icc,$src,$dst" %} 7066 opcode(0x101); 7067 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7068 ins_pipe(int_conditional_float_move); 7069 %} 7070 7071 // Conditional move, 7072 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{ 7073 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src))); 7074 ins_cost(150); 7075 size(4); 7076 format %{ "FMOVF$cmp $fcc,$src,$dst" %} 7077 opcode(0x1); 7078 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7079 ins_pipe(int_conditional_double_move); 7080 %} 7081 7082 // Conditional move 7083 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{ 7084 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src))); 7085 ins_cost(150); 7086 size(4); 7087 opcode(0x102); 7088 format %{ "FMOVD$cmp $pcc,$src,$dst" %} 7089 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7090 ins_pipe(int_conditional_double_move); 7091 %} 7092 7093 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{ 7094 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7095 ins_cost(150); 7096 7097 size(4); 7098 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7099 opcode(0x102); 7100 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7101 ins_pipe(int_conditional_double_move); 7102 %} 7103 7104 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{ 7105 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src))); 7106 ins_cost(150); 7107 7108 size(4); 7109 format %{ "FMOVD$cmp $icc,$src,$dst" %} 7110 opcode(0x102); 7111 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) ); 7112 ins_pipe(int_conditional_double_move); 7113 %} 7114 7115 // Conditional move, 7116 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{ 7117 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src))); 7118 ins_cost(150); 7119 size(4); 7120 format %{ "FMOVD$cmp $fcc,$src,$dst" %} 7121 opcode(0x2); 7122 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) ); 7123 ins_pipe(int_conditional_double_move); 7124 %} 7125 7126 // Conditional move 7127 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{ 7128 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7129 ins_cost(150); 7130 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7131 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) ); 7132 ins_pipe(ialu_reg); 7133 %} 7134 7135 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{ 7136 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src))); 7137 ins_cost(140); 7138 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %} 7139 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) ); 7140 ins_pipe(ialu_imm); 7141 %} 7142 7143 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{ 7144 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7145 ins_cost(150); 7146 7147 size(4); 7148 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7149 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7150 ins_pipe(ialu_reg); 7151 %} 7152 7153 7154 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{ 7155 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src))); 7156 ins_cost(150); 7157 7158 size(4); 7159 format %{ "MOV$cmp $icc,$src,$dst\t! long" %} 7160 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) ); 7161 ins_pipe(ialu_reg); 7162 %} 7163 7164 7165 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{ 7166 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src))); 7167 ins_cost(150); 7168 7169 size(4); 7170 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %} 7171 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) ); 7172 ins_pipe(ialu_reg); 7173 %} 7174 7175 7176 7177 //----------OS and Locking Instructions---------------------------------------- 7178 7179 // This name is KNOWN by the ADLC and cannot be changed. 7180 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type 7181 // for this guy. 7182 instruct tlsLoadP(g2RegP dst) %{ 7183 match(Set dst (ThreadLocal)); 7184 7185 size(0); 7186 ins_cost(0); 7187 format %{ "# TLS is in G2" %} 7188 ins_encode( /*empty encoding*/ ); 7189 ins_pipe(ialu_none); 7190 %} 7191 7192 instruct checkCastPP( iRegP dst ) %{ 7193 match(Set dst (CheckCastPP dst)); 7194 7195 size(0); 7196 format %{ "# checkcastPP of $dst" %} 7197 ins_encode( /*empty encoding*/ ); 7198 ins_pipe(empty); 7199 %} 7200 7201 7202 instruct castPP( iRegP dst ) %{ 7203 match(Set dst (CastPP dst)); 7204 format %{ "# castPP of $dst" %} 7205 ins_encode( /*empty encoding*/ ); 7206 ins_pipe(empty); 7207 %} 7208 7209 instruct castII( iRegI dst ) %{ 7210 match(Set dst (CastII dst)); 7211 format %{ "# castII of $dst" %} 7212 ins_encode( /*empty encoding*/ ); 7213 ins_cost(0); 7214 ins_pipe(empty); 7215 %} 7216 7217 //----------Arithmetic Instructions-------------------------------------------- 7218 // Addition Instructions 7219 // Register Addition 7220 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7221 match(Set dst (AddI src1 src2)); 7222 7223 size(4); 7224 format %{ "ADD $src1,$src2,$dst" %} 7225 ins_encode %{ 7226 __ add($src1$$Register, $src2$$Register, $dst$$Register); 7227 %} 7228 ins_pipe(ialu_reg_reg); 7229 %} 7230 7231 // Immediate Addition 7232 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7233 match(Set dst (AddI src1 src2)); 7234 7235 size(4); 7236 format %{ "ADD $src1,$src2,$dst" %} 7237 opcode(Assembler::add_op3, Assembler::arith_op); 7238 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7239 ins_pipe(ialu_reg_imm); 7240 %} 7241 7242 // Pointer Register Addition 7243 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{ 7244 match(Set dst (AddP src1 src2)); 7245 7246 size(4); 7247 format %{ "ADD $src1,$src2,$dst" %} 7248 opcode(Assembler::add_op3, Assembler::arith_op); 7249 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7250 ins_pipe(ialu_reg_reg); 7251 %} 7252 7253 // Pointer Immediate Addition 7254 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{ 7255 match(Set dst (AddP src1 src2)); 7256 7257 size(4); 7258 format %{ "ADD $src1,$src2,$dst" %} 7259 opcode(Assembler::add_op3, Assembler::arith_op); 7260 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7261 ins_pipe(ialu_reg_imm); 7262 %} 7263 7264 // Long Addition 7265 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7266 match(Set dst (AddL src1 src2)); 7267 7268 size(4); 7269 format %{ "ADD $src1,$src2,$dst\t! long" %} 7270 opcode(Assembler::add_op3, Assembler::arith_op); 7271 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7272 ins_pipe(ialu_reg_reg); 7273 %} 7274 7275 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7276 match(Set dst (AddL src1 con)); 7277 7278 size(4); 7279 format %{ "ADD $src1,$con,$dst" %} 7280 opcode(Assembler::add_op3, Assembler::arith_op); 7281 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7282 ins_pipe(ialu_reg_imm); 7283 %} 7284 7285 //----------Conditional_store-------------------------------------------------- 7286 // Conditional-store of the updated heap-top. 7287 // Used during allocation of the shared heap. 7288 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 7289 7290 // LoadP-locked. Same as a regular pointer load when used with a compare-swap 7291 instruct loadPLocked(iRegP dst, memory mem) %{ 7292 match(Set dst (LoadPLocked mem)); 7293 ins_cost(MEMORY_REF_COST); 7294 7295 #ifndef _LP64 7296 size(4); 7297 format %{ "LDUW $mem,$dst\t! ptr" %} 7298 opcode(Assembler::lduw_op3, 0, REGP_OP); 7299 #else 7300 format %{ "LDX $mem,$dst\t! ptr" %} 7301 opcode(Assembler::ldx_op3, 0, REGP_OP); 7302 #endif 7303 ins_encode( form3_mem_reg( mem, dst ) ); 7304 ins_pipe(iload_mem); 7305 %} 7306 7307 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{ 7308 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval))); 7309 effect( KILL newval ); 7310 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t" 7311 "CMP R_G3,$oldval\t\t! See if we made progress" %} 7312 ins_encode( enc_cas(heap_top_ptr,oldval,newval) ); 7313 ins_pipe( long_memory_op ); 7314 %} 7315 7316 // Conditional-store of an int value. 7317 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{ 7318 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval))); 7319 effect( KILL newval ); 7320 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7321 "CMP $oldval,$newval\t\t! See if we made progress" %} 7322 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7323 ins_pipe( long_memory_op ); 7324 %} 7325 7326 // Conditional-store of a long value. 7327 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{ 7328 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval))); 7329 effect( KILL newval ); 7330 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t" 7331 "CMP $oldval,$newval\t\t! See if we made progress" %} 7332 ins_encode( enc_cas(mem_ptr,oldval,newval) ); 7333 ins_pipe( long_memory_op ); 7334 %} 7335 7336 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 7337 7338 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7339 predicate(VM_Version::supports_cx8()); 7340 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 7341 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7342 format %{ 7343 "MOV $newval,O7\n\t" 7344 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7345 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7346 "MOV 1,$res\n\t" 7347 "MOVne xcc,R_G0,$res" 7348 %} 7349 ins_encode( enc_casx(mem_ptr, oldval, newval), 7350 enc_lflags_ne_to_boolean(res) ); 7351 ins_pipe( long_memory_op ); 7352 %} 7353 7354 7355 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7356 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 7357 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7358 format %{ 7359 "MOV $newval,O7\n\t" 7360 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7361 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7362 "MOV 1,$res\n\t" 7363 "MOVne icc,R_G0,$res" 7364 %} 7365 ins_encode( enc_casi(mem_ptr, oldval, newval), 7366 enc_iflags_ne_to_boolean(res) ); 7367 ins_pipe( long_memory_op ); 7368 %} 7369 7370 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7371 #ifdef _LP64 7372 predicate(VM_Version::supports_cx8()); 7373 #endif 7374 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 7375 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7376 format %{ 7377 "MOV $newval,O7\n\t" 7378 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7379 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7380 "MOV 1,$res\n\t" 7381 "MOVne xcc,R_G0,$res" 7382 %} 7383 #ifdef _LP64 7384 ins_encode( enc_casx(mem_ptr, oldval, newval), 7385 enc_lflags_ne_to_boolean(res) ); 7386 #else 7387 ins_encode( enc_casi(mem_ptr, oldval, newval), 7388 enc_iflags_ne_to_boolean(res) ); 7389 #endif 7390 ins_pipe( long_memory_op ); 7391 %} 7392 7393 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{ 7394 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 7395 effect( USE mem_ptr, KILL ccr, KILL tmp1); 7396 format %{ 7397 "MOV $newval,O7\n\t" 7398 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t" 7399 "CMP $oldval,O7\t\t! See if we made progress\n\t" 7400 "MOV 1,$res\n\t" 7401 "MOVne icc,R_G0,$res" 7402 %} 7403 ins_encode( enc_casi(mem_ptr, oldval, newval), 7404 enc_iflags_ne_to_boolean(res) ); 7405 ins_pipe( long_memory_op ); 7406 %} 7407 7408 instruct xchgI( memory mem, iRegI newval) %{ 7409 match(Set newval (GetAndSetI mem newval)); 7410 format %{ "SWAP [$mem],$newval" %} 7411 size(4); 7412 ins_encode %{ 7413 __ swap($mem$$Address, $newval$$Register); 7414 %} 7415 ins_pipe( long_memory_op ); 7416 %} 7417 7418 #ifndef _LP64 7419 instruct xchgP( memory mem, iRegP newval) %{ 7420 match(Set newval (GetAndSetP mem newval)); 7421 format %{ "SWAP [$mem],$newval" %} 7422 size(4); 7423 ins_encode %{ 7424 __ swap($mem$$Address, $newval$$Register); 7425 %} 7426 ins_pipe( long_memory_op ); 7427 %} 7428 #endif 7429 7430 instruct xchgN( memory mem, iRegN newval) %{ 7431 match(Set newval (GetAndSetN mem newval)); 7432 format %{ "SWAP [$mem],$newval" %} 7433 size(4); 7434 ins_encode %{ 7435 __ swap($mem$$Address, $newval$$Register); 7436 %} 7437 ins_pipe( long_memory_op ); 7438 %} 7439 7440 //--------------------- 7441 // Subtraction Instructions 7442 // Register Subtraction 7443 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7444 match(Set dst (SubI src1 src2)); 7445 7446 size(4); 7447 format %{ "SUB $src1,$src2,$dst" %} 7448 opcode(Assembler::sub_op3, Assembler::arith_op); 7449 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7450 ins_pipe(ialu_reg_reg); 7451 %} 7452 7453 // Immediate Subtraction 7454 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7455 match(Set dst (SubI src1 src2)); 7456 7457 size(4); 7458 format %{ "SUB $src1,$src2,$dst" %} 7459 opcode(Assembler::sub_op3, Assembler::arith_op); 7460 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7461 ins_pipe(ialu_reg_imm); 7462 %} 7463 7464 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{ 7465 match(Set dst (SubI zero src2)); 7466 7467 size(4); 7468 format %{ "NEG $src2,$dst" %} 7469 opcode(Assembler::sub_op3, Assembler::arith_op); 7470 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7471 ins_pipe(ialu_zero_reg); 7472 %} 7473 7474 // Long subtraction 7475 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7476 match(Set dst (SubL src1 src2)); 7477 7478 size(4); 7479 format %{ "SUB $src1,$src2,$dst\t! long" %} 7480 opcode(Assembler::sub_op3, Assembler::arith_op); 7481 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7482 ins_pipe(ialu_reg_reg); 7483 %} 7484 7485 // Immediate Subtraction 7486 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 7487 match(Set dst (SubL src1 con)); 7488 7489 size(4); 7490 format %{ "SUB $src1,$con,$dst\t! long" %} 7491 opcode(Assembler::sub_op3, Assembler::arith_op); 7492 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 7493 ins_pipe(ialu_reg_imm); 7494 %} 7495 7496 // Long negation 7497 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{ 7498 match(Set dst (SubL zero src2)); 7499 7500 size(4); 7501 format %{ "NEG $src2,$dst\t! long" %} 7502 opcode(Assembler::sub_op3, Assembler::arith_op); 7503 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) ); 7504 ins_pipe(ialu_zero_reg); 7505 %} 7506 7507 // Multiplication Instructions 7508 // Integer Multiplication 7509 // Register Multiplication 7510 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7511 match(Set dst (MulI src1 src2)); 7512 7513 size(4); 7514 format %{ "MULX $src1,$src2,$dst" %} 7515 opcode(Assembler::mulx_op3, Assembler::arith_op); 7516 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7517 ins_pipe(imul_reg_reg); 7518 %} 7519 7520 // Immediate Multiplication 7521 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 7522 match(Set dst (MulI src1 src2)); 7523 7524 size(4); 7525 format %{ "MULX $src1,$src2,$dst" %} 7526 opcode(Assembler::mulx_op3, Assembler::arith_op); 7527 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7528 ins_pipe(imul_reg_imm); 7529 %} 7530 7531 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7532 match(Set dst (MulL src1 src2)); 7533 ins_cost(DEFAULT_COST * 5); 7534 size(4); 7535 format %{ "MULX $src1,$src2,$dst\t! long" %} 7536 opcode(Assembler::mulx_op3, Assembler::arith_op); 7537 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7538 ins_pipe(mulL_reg_reg); 7539 %} 7540 7541 // Immediate Multiplication 7542 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7543 match(Set dst (MulL src1 src2)); 7544 ins_cost(DEFAULT_COST * 5); 7545 size(4); 7546 format %{ "MULX $src1,$src2,$dst" %} 7547 opcode(Assembler::mulx_op3, Assembler::arith_op); 7548 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7549 ins_pipe(mulL_reg_imm); 7550 %} 7551 7552 // Integer Division 7553 // Register Division 7554 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{ 7555 match(Set dst (DivI src1 src2)); 7556 ins_cost((2+71)*DEFAULT_COST); 7557 7558 format %{ "SRA $src2,0,$src2\n\t" 7559 "SRA $src1,0,$src1\n\t" 7560 "SDIVX $src1,$src2,$dst" %} 7561 ins_encode( idiv_reg( src1, src2, dst ) ); 7562 ins_pipe(sdiv_reg_reg); 7563 %} 7564 7565 // Immediate Division 7566 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{ 7567 match(Set dst (DivI src1 src2)); 7568 ins_cost((2+71)*DEFAULT_COST); 7569 7570 format %{ "SRA $src1,0,$src1\n\t" 7571 "SDIVX $src1,$src2,$dst" %} 7572 ins_encode( idiv_imm( src1, src2, dst ) ); 7573 ins_pipe(sdiv_reg_imm); 7574 %} 7575 7576 //----------Div-By-10-Expansion------------------------------------------------ 7577 // Extract hi bits of a 32x32->64 bit multiply. 7578 // Expand rule only, not matched 7579 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{ 7580 effect( DEF dst, USE src1, USE src2 ); 7581 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t" 7582 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %} 7583 ins_encode( enc_mul_hi(dst,src1,src2)); 7584 ins_pipe(sdiv_reg_reg); 7585 %} 7586 7587 // Magic constant, reciprocal of 10 7588 instruct loadConI_x66666667(iRegIsafe dst) %{ 7589 effect( DEF dst ); 7590 7591 size(8); 7592 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %} 7593 ins_encode( Set32(0x66666667, dst) ); 7594 ins_pipe(ialu_hi_lo_reg); 7595 %} 7596 7597 // Register Shift Right Arithmetic Long by 32-63 7598 instruct sra_31( iRegI dst, iRegI src ) %{ 7599 effect( DEF dst, USE src ); 7600 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 7601 ins_encode( form3_rs1_rd_copysign_hi(src,dst) ); 7602 ins_pipe(ialu_reg_reg); 7603 %} 7604 7605 // Arithmetic Shift Right by 8-bit immediate 7606 instruct sra_reg_2( iRegI dst, iRegI src ) %{ 7607 effect( DEF dst, USE src ); 7608 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %} 7609 opcode(Assembler::sra_op3, Assembler::arith_op); 7610 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) ); 7611 ins_pipe(ialu_reg_imm); 7612 %} 7613 7614 // Integer DIV with 10 7615 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{ 7616 match(Set dst (DivI src div)); 7617 ins_cost((6+6)*DEFAULT_COST); 7618 expand %{ 7619 iRegIsafe tmp1; // Killed temps; 7620 iRegIsafe tmp2; // Killed temps; 7621 iRegI tmp3; // Killed temps; 7622 iRegI tmp4; // Killed temps; 7623 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1 7624 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2 7625 sra_31( tmp3, src ); // SRA src,31 -> tmp3 7626 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4 7627 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst 7628 %} 7629 %} 7630 7631 // Register Long Division 7632 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7633 match(Set dst (DivL src1 src2)); 7634 ins_cost(DEFAULT_COST*71); 7635 size(4); 7636 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7637 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7638 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7639 ins_pipe(divL_reg_reg); 7640 %} 7641 7642 // Register Long Division 7643 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7644 match(Set dst (DivL src1 src2)); 7645 ins_cost(DEFAULT_COST*71); 7646 size(4); 7647 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7648 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7649 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7650 ins_pipe(divL_reg_imm); 7651 %} 7652 7653 // Integer Remainder 7654 // Register Remainder 7655 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{ 7656 match(Set dst (ModI src1 src2)); 7657 effect( KILL ccr, KILL temp); 7658 7659 format %{ "SREM $src1,$src2,$dst" %} 7660 ins_encode( irem_reg(src1, src2, dst, temp) ); 7661 ins_pipe(sdiv_reg_reg); 7662 %} 7663 7664 // Immediate Remainder 7665 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{ 7666 match(Set dst (ModI src1 src2)); 7667 effect( KILL ccr, KILL temp); 7668 7669 format %{ "SREM $src1,$src2,$dst" %} 7670 ins_encode( irem_imm(src1, src2, dst, temp) ); 7671 ins_pipe(sdiv_reg_imm); 7672 %} 7673 7674 // Register Long Remainder 7675 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7676 effect(DEF dst, USE src1, USE src2); 7677 size(4); 7678 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7679 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7680 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7681 ins_pipe(divL_reg_reg); 7682 %} 7683 7684 // Register Long Division 7685 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7686 effect(DEF dst, USE src1, USE src2); 7687 size(4); 7688 format %{ "SDIVX $src1,$src2,$dst\t! long" %} 7689 opcode(Assembler::sdivx_op3, Assembler::arith_op); 7690 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7691 ins_pipe(divL_reg_imm); 7692 %} 7693 7694 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7695 effect(DEF dst, USE src1, USE src2); 7696 size(4); 7697 format %{ "MULX $src1,$src2,$dst\t! long" %} 7698 opcode(Assembler::mulx_op3, Assembler::arith_op); 7699 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7700 ins_pipe(mulL_reg_reg); 7701 %} 7702 7703 // Immediate Multiplication 7704 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{ 7705 effect(DEF dst, USE src1, USE src2); 7706 size(4); 7707 format %{ "MULX $src1,$src2,$dst" %} 7708 opcode(Assembler::mulx_op3, Assembler::arith_op); 7709 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 7710 ins_pipe(mulL_reg_imm); 7711 %} 7712 7713 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{ 7714 effect(DEF dst, USE src1, USE src2); 7715 size(4); 7716 format %{ "SUB $src1,$src2,$dst\t! long" %} 7717 opcode(Assembler::sub_op3, Assembler::arith_op); 7718 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7719 ins_pipe(ialu_reg_reg); 7720 %} 7721 7722 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{ 7723 effect(DEF dst, USE src1, USE src2); 7724 size(4); 7725 format %{ "SUB $src1,$src2,$dst\t! long" %} 7726 opcode(Assembler::sub_op3, Assembler::arith_op); 7727 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7728 ins_pipe(ialu_reg_reg); 7729 %} 7730 7731 // Register Long Remainder 7732 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 7733 match(Set dst (ModL src1 src2)); 7734 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7735 expand %{ 7736 iRegL tmp1; 7737 iRegL tmp2; 7738 divL_reg_reg_1(tmp1, src1, src2); 7739 mulL_reg_reg_1(tmp2, tmp1, src2); 7740 subL_reg_reg_1(dst, src1, tmp2); 7741 %} 7742 %} 7743 7744 // Register Long Remainder 7745 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{ 7746 match(Set dst (ModL src1 src2)); 7747 ins_cost(DEFAULT_COST*(71 + 6 + 1)); 7748 expand %{ 7749 iRegL tmp1; 7750 iRegL tmp2; 7751 divL_reg_imm13_1(tmp1, src1, src2); 7752 mulL_reg_imm13_1(tmp2, tmp1, src2); 7753 subL_reg_reg_2 (dst, src1, tmp2); 7754 %} 7755 %} 7756 7757 // Integer Shift Instructions 7758 // Register Shift Left 7759 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7760 match(Set dst (LShiftI src1 src2)); 7761 7762 size(4); 7763 format %{ "SLL $src1,$src2,$dst" %} 7764 opcode(Assembler::sll_op3, Assembler::arith_op); 7765 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7766 ins_pipe(ialu_reg_reg); 7767 %} 7768 7769 // Register Shift Left Immediate 7770 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7771 match(Set dst (LShiftI src1 src2)); 7772 7773 size(4); 7774 format %{ "SLL $src1,$src2,$dst" %} 7775 opcode(Assembler::sll_op3, Assembler::arith_op); 7776 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7777 ins_pipe(ialu_reg_imm); 7778 %} 7779 7780 // Register Shift Left 7781 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7782 match(Set dst (LShiftL src1 src2)); 7783 7784 size(4); 7785 format %{ "SLLX $src1,$src2,$dst" %} 7786 opcode(Assembler::sllx_op3, Assembler::arith_op); 7787 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7788 ins_pipe(ialu_reg_reg); 7789 %} 7790 7791 // Register Shift Left Immediate 7792 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7793 match(Set dst (LShiftL src1 src2)); 7794 7795 size(4); 7796 format %{ "SLLX $src1,$src2,$dst" %} 7797 opcode(Assembler::sllx_op3, Assembler::arith_op); 7798 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7799 ins_pipe(ialu_reg_imm); 7800 %} 7801 7802 // Register Arithmetic Shift Right 7803 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7804 match(Set dst (RShiftI src1 src2)); 7805 size(4); 7806 format %{ "SRA $src1,$src2,$dst" %} 7807 opcode(Assembler::sra_op3, Assembler::arith_op); 7808 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7809 ins_pipe(ialu_reg_reg); 7810 %} 7811 7812 // Register Arithmetic Shift Right Immediate 7813 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7814 match(Set dst (RShiftI src1 src2)); 7815 7816 size(4); 7817 format %{ "SRA $src1,$src2,$dst" %} 7818 opcode(Assembler::sra_op3, Assembler::arith_op); 7819 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7820 ins_pipe(ialu_reg_imm); 7821 %} 7822 7823 // Register Shift Right Arithmatic Long 7824 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7825 match(Set dst (RShiftL src1 src2)); 7826 7827 size(4); 7828 format %{ "SRAX $src1,$src2,$dst" %} 7829 opcode(Assembler::srax_op3, Assembler::arith_op); 7830 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7831 ins_pipe(ialu_reg_reg); 7832 %} 7833 7834 // Register Shift Left Immediate 7835 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7836 match(Set dst (RShiftL src1 src2)); 7837 7838 size(4); 7839 format %{ "SRAX $src1,$src2,$dst" %} 7840 opcode(Assembler::srax_op3, Assembler::arith_op); 7841 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7842 ins_pipe(ialu_reg_imm); 7843 %} 7844 7845 // Register Shift Right 7846 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 7847 match(Set dst (URShiftI src1 src2)); 7848 7849 size(4); 7850 format %{ "SRL $src1,$src2,$dst" %} 7851 opcode(Assembler::srl_op3, Assembler::arith_op); 7852 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 7853 ins_pipe(ialu_reg_reg); 7854 %} 7855 7856 // Register Shift Right Immediate 7857 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{ 7858 match(Set dst (URShiftI src1 src2)); 7859 7860 size(4); 7861 format %{ "SRL $src1,$src2,$dst" %} 7862 opcode(Assembler::srl_op3, Assembler::arith_op); 7863 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7864 ins_pipe(ialu_reg_imm); 7865 %} 7866 7867 // Register Shift Right 7868 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{ 7869 match(Set dst (URShiftL src1 src2)); 7870 7871 size(4); 7872 format %{ "SRLX $src1,$src2,$dst" %} 7873 opcode(Assembler::srlx_op3, Assembler::arith_op); 7874 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) ); 7875 ins_pipe(ialu_reg_reg); 7876 %} 7877 7878 // Register Shift Right Immediate 7879 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{ 7880 match(Set dst (URShiftL src1 src2)); 7881 7882 size(4); 7883 format %{ "SRLX $src1,$src2,$dst" %} 7884 opcode(Assembler::srlx_op3, Assembler::arith_op); 7885 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7886 ins_pipe(ialu_reg_imm); 7887 %} 7888 7889 // Register Shift Right Immediate with a CastP2X 7890 #ifdef _LP64 7891 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{ 7892 match(Set dst (URShiftL (CastP2X src1) src2)); 7893 size(4); 7894 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %} 7895 opcode(Assembler::srlx_op3, Assembler::arith_op); 7896 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) ); 7897 ins_pipe(ialu_reg_imm); 7898 %} 7899 #else 7900 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{ 7901 match(Set dst (URShiftI (CastP2X src1) src2)); 7902 size(4); 7903 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %} 7904 opcode(Assembler::srl_op3, Assembler::arith_op); 7905 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) ); 7906 ins_pipe(ialu_reg_imm); 7907 %} 7908 #endif 7909 7910 7911 //----------Floating Point Arithmetic Instructions----------------------------- 7912 7913 // Add float single precision 7914 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 7915 match(Set dst (AddF src1 src2)); 7916 7917 size(4); 7918 format %{ "FADDS $src1,$src2,$dst" %} 7919 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf); 7920 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7921 ins_pipe(faddF_reg_reg); 7922 %} 7923 7924 // Add float double precision 7925 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 7926 match(Set dst (AddD src1 src2)); 7927 7928 size(4); 7929 format %{ "FADDD $src1,$src2,$dst" %} 7930 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 7931 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7932 ins_pipe(faddD_reg_reg); 7933 %} 7934 7935 // Sub float single precision 7936 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 7937 match(Set dst (SubF src1 src2)); 7938 7939 size(4); 7940 format %{ "FSUBS $src1,$src2,$dst" %} 7941 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf); 7942 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7943 ins_pipe(faddF_reg_reg); 7944 %} 7945 7946 // Sub float double precision 7947 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 7948 match(Set dst (SubD src1 src2)); 7949 7950 size(4); 7951 format %{ "FSUBD $src1,$src2,$dst" %} 7952 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 7953 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7954 ins_pipe(faddD_reg_reg); 7955 %} 7956 7957 // Mul float single precision 7958 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 7959 match(Set dst (MulF src1 src2)); 7960 7961 size(4); 7962 format %{ "FMULS $src1,$src2,$dst" %} 7963 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf); 7964 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7965 ins_pipe(fmulF_reg_reg); 7966 %} 7967 7968 // Mul float double precision 7969 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 7970 match(Set dst (MulD src1 src2)); 7971 7972 size(4); 7973 format %{ "FMULD $src1,$src2,$dst" %} 7974 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 7975 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7976 ins_pipe(fmulD_reg_reg); 7977 %} 7978 7979 // Div float single precision 7980 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 7981 match(Set dst (DivF src1 src2)); 7982 7983 size(4); 7984 format %{ "FDIVS $src1,$src2,$dst" %} 7985 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf); 7986 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst)); 7987 ins_pipe(fdivF_reg_reg); 7988 %} 7989 7990 // Div float double precision 7991 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 7992 match(Set dst (DivD src1 src2)); 7993 7994 size(4); 7995 format %{ "FDIVD $src1,$src2,$dst" %} 7996 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf); 7997 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 7998 ins_pipe(fdivD_reg_reg); 7999 %} 8000 8001 // Absolute float double precision 8002 instruct absD_reg(regD dst, regD src) %{ 8003 match(Set dst (AbsD src)); 8004 8005 format %{ "FABSd $src,$dst" %} 8006 ins_encode(fabsd(dst, src)); 8007 ins_pipe(faddD_reg); 8008 %} 8009 8010 // Absolute float single precision 8011 instruct absF_reg(regF dst, regF src) %{ 8012 match(Set dst (AbsF src)); 8013 8014 format %{ "FABSs $src,$dst" %} 8015 ins_encode(fabss(dst, src)); 8016 ins_pipe(faddF_reg); 8017 %} 8018 8019 instruct negF_reg(regF dst, regF src) %{ 8020 match(Set dst (NegF src)); 8021 8022 size(4); 8023 format %{ "FNEGs $src,$dst" %} 8024 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf); 8025 ins_encode(form3_opf_rs2F_rdF(src, dst)); 8026 ins_pipe(faddF_reg); 8027 %} 8028 8029 instruct negD_reg(regD dst, regD src) %{ 8030 match(Set dst (NegD src)); 8031 8032 format %{ "FNEGd $src,$dst" %} 8033 ins_encode(fnegd(dst, src)); 8034 ins_pipe(faddD_reg); 8035 %} 8036 8037 // Sqrt float double precision 8038 instruct sqrtF_reg_reg(regF dst, regF src) %{ 8039 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 8040 8041 size(4); 8042 format %{ "FSQRTS $src,$dst" %} 8043 ins_encode(fsqrts(dst, src)); 8044 ins_pipe(fdivF_reg_reg); 8045 %} 8046 8047 // Sqrt float double precision 8048 instruct sqrtD_reg_reg(regD dst, regD src) %{ 8049 match(Set dst (SqrtD src)); 8050 8051 size(4); 8052 format %{ "FSQRTD $src,$dst" %} 8053 ins_encode(fsqrtd(dst, src)); 8054 ins_pipe(fdivD_reg_reg); 8055 %} 8056 8057 //----------Logical Instructions----------------------------------------------- 8058 // And Instructions 8059 // Register And 8060 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8061 match(Set dst (AndI src1 src2)); 8062 8063 size(4); 8064 format %{ "AND $src1,$src2,$dst" %} 8065 opcode(Assembler::and_op3, Assembler::arith_op); 8066 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8067 ins_pipe(ialu_reg_reg); 8068 %} 8069 8070 // Immediate And 8071 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8072 match(Set dst (AndI src1 src2)); 8073 8074 size(4); 8075 format %{ "AND $src1,$src2,$dst" %} 8076 opcode(Assembler::and_op3, Assembler::arith_op); 8077 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8078 ins_pipe(ialu_reg_imm); 8079 %} 8080 8081 // Register And Long 8082 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8083 match(Set dst (AndL src1 src2)); 8084 8085 ins_cost(DEFAULT_COST); 8086 size(4); 8087 format %{ "AND $src1,$src2,$dst\t! long" %} 8088 opcode(Assembler::and_op3, Assembler::arith_op); 8089 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8090 ins_pipe(ialu_reg_reg); 8091 %} 8092 8093 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8094 match(Set dst (AndL src1 con)); 8095 8096 ins_cost(DEFAULT_COST); 8097 size(4); 8098 format %{ "AND $src1,$con,$dst\t! long" %} 8099 opcode(Assembler::and_op3, Assembler::arith_op); 8100 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8101 ins_pipe(ialu_reg_imm); 8102 %} 8103 8104 // Or Instructions 8105 // Register Or 8106 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8107 match(Set dst (OrI src1 src2)); 8108 8109 size(4); 8110 format %{ "OR $src1,$src2,$dst" %} 8111 opcode(Assembler::or_op3, Assembler::arith_op); 8112 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8113 ins_pipe(ialu_reg_reg); 8114 %} 8115 8116 // Immediate Or 8117 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8118 match(Set dst (OrI src1 src2)); 8119 8120 size(4); 8121 format %{ "OR $src1,$src2,$dst" %} 8122 opcode(Assembler::or_op3, Assembler::arith_op); 8123 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8124 ins_pipe(ialu_reg_imm); 8125 %} 8126 8127 // Register Or Long 8128 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8129 match(Set dst (OrL src1 src2)); 8130 8131 ins_cost(DEFAULT_COST); 8132 size(4); 8133 format %{ "OR $src1,$src2,$dst\t! long" %} 8134 opcode(Assembler::or_op3, Assembler::arith_op); 8135 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8136 ins_pipe(ialu_reg_reg); 8137 %} 8138 8139 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8140 match(Set dst (OrL src1 con)); 8141 ins_cost(DEFAULT_COST*2); 8142 8143 ins_cost(DEFAULT_COST); 8144 size(4); 8145 format %{ "OR $src1,$con,$dst\t! long" %} 8146 opcode(Assembler::or_op3, Assembler::arith_op); 8147 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8148 ins_pipe(ialu_reg_imm); 8149 %} 8150 8151 #ifndef _LP64 8152 8153 // Use sp_ptr_RegP to match G2 (TLS register) without spilling. 8154 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{ 8155 match(Set dst (OrI src1 (CastP2X src2))); 8156 8157 size(4); 8158 format %{ "OR $src1,$src2,$dst" %} 8159 opcode(Assembler::or_op3, Assembler::arith_op); 8160 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8161 ins_pipe(ialu_reg_reg); 8162 %} 8163 8164 #else 8165 8166 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{ 8167 match(Set dst (OrL src1 (CastP2X src2))); 8168 8169 ins_cost(DEFAULT_COST); 8170 size(4); 8171 format %{ "OR $src1,$src2,$dst\t! long" %} 8172 opcode(Assembler::or_op3, Assembler::arith_op); 8173 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8174 ins_pipe(ialu_reg_reg); 8175 %} 8176 8177 #endif 8178 8179 // Xor Instructions 8180 // Register Xor 8181 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{ 8182 match(Set dst (XorI src1 src2)); 8183 8184 size(4); 8185 format %{ "XOR $src1,$src2,$dst" %} 8186 opcode(Assembler::xor_op3, Assembler::arith_op); 8187 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8188 ins_pipe(ialu_reg_reg); 8189 %} 8190 8191 // Immediate Xor 8192 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{ 8193 match(Set dst (XorI src1 src2)); 8194 8195 size(4); 8196 format %{ "XOR $src1,$src2,$dst" %} 8197 opcode(Assembler::xor_op3, Assembler::arith_op); 8198 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) ); 8199 ins_pipe(ialu_reg_imm); 8200 %} 8201 8202 // Register Xor Long 8203 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{ 8204 match(Set dst (XorL src1 src2)); 8205 8206 ins_cost(DEFAULT_COST); 8207 size(4); 8208 format %{ "XOR $src1,$src2,$dst\t! long" %} 8209 opcode(Assembler::xor_op3, Assembler::arith_op); 8210 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) ); 8211 ins_pipe(ialu_reg_reg); 8212 %} 8213 8214 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{ 8215 match(Set dst (XorL src1 con)); 8216 8217 ins_cost(DEFAULT_COST); 8218 size(4); 8219 format %{ "XOR $src1,$con,$dst\t! long" %} 8220 opcode(Assembler::xor_op3, Assembler::arith_op); 8221 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) ); 8222 ins_pipe(ialu_reg_imm); 8223 %} 8224 8225 //----------Convert to Boolean------------------------------------------------- 8226 // Nice hack for 32-bit tests but doesn't work for 8227 // 64-bit pointers. 8228 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{ 8229 match(Set dst (Conv2B src)); 8230 effect( KILL ccr ); 8231 ins_cost(DEFAULT_COST*2); 8232 format %{ "CMP R_G0,$src\n\t" 8233 "ADDX R_G0,0,$dst" %} 8234 ins_encode( enc_to_bool( src, dst ) ); 8235 ins_pipe(ialu_reg_ialu); 8236 %} 8237 8238 #ifndef _LP64 8239 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{ 8240 match(Set dst (Conv2B src)); 8241 effect( KILL ccr ); 8242 ins_cost(DEFAULT_COST*2); 8243 format %{ "CMP R_G0,$src\n\t" 8244 "ADDX R_G0,0,$dst" %} 8245 ins_encode( enc_to_bool( src, dst ) ); 8246 ins_pipe(ialu_reg_ialu); 8247 %} 8248 #else 8249 instruct convP2B( iRegI dst, iRegP src ) %{ 8250 match(Set dst (Conv2B src)); 8251 ins_cost(DEFAULT_COST*2); 8252 format %{ "MOV $src,$dst\n\t" 8253 "MOVRNZ $src,1,$dst" %} 8254 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) ); 8255 ins_pipe(ialu_clr_and_mover); 8256 %} 8257 #endif 8258 8259 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{ 8260 match(Set dst (CmpLTMask src zero)); 8261 effect(KILL ccr); 8262 size(4); 8263 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %} 8264 ins_encode %{ 8265 __ sra($src$$Register, 31, $dst$$Register); 8266 %} 8267 ins_pipe(ialu_reg_imm); 8268 %} 8269 8270 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{ 8271 match(Set dst (CmpLTMask p q)); 8272 effect( KILL ccr ); 8273 ins_cost(DEFAULT_COST*4); 8274 format %{ "CMP $p,$q\n\t" 8275 "MOV #0,$dst\n\t" 8276 "BLT,a .+8\n\t" 8277 "MOV #-1,$dst" %} 8278 ins_encode( enc_ltmask(p,q,dst) ); 8279 ins_pipe(ialu_reg_reg_ialu); 8280 %} 8281 8282 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{ 8283 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 8284 effect(KILL ccr, TEMP tmp); 8285 ins_cost(DEFAULT_COST*3); 8286 8287 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t" 8288 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t" 8289 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %} 8290 ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp)); 8291 ins_pipe(cadd_cmpltmask); 8292 %} 8293 8294 instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{ 8295 match(Set p (AndI (CmpLTMask p q) y)); 8296 effect(KILL ccr); 8297 ins_cost(DEFAULT_COST*3); 8298 8299 format %{ "CMP $p,$q\n\t" 8300 "MOV $y,$p\n\t" 8301 "MOVge G0,$p" %} 8302 ins_encode %{ 8303 __ cmp($p$$Register, $q$$Register); 8304 __ mov($y$$Register, $p$$Register); 8305 __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register); 8306 %} 8307 ins_pipe(ialu_reg_reg_ialu); 8308 %} 8309 8310 //----------------------------------------------------------------- 8311 // Direct raw moves between float and general registers using VIS3. 8312 8313 // ins_pipe(faddF_reg); 8314 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{ 8315 predicate(UseVIS >= 3); 8316 match(Set dst (MoveF2I src)); 8317 8318 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %} 8319 ins_encode %{ 8320 __ movstouw($src$$FloatRegister, $dst$$Register); 8321 %} 8322 ins_pipe(ialu_reg_reg); 8323 %} 8324 8325 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{ 8326 predicate(UseVIS >= 3); 8327 match(Set dst (MoveI2F src)); 8328 8329 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %} 8330 ins_encode %{ 8331 __ movwtos($src$$Register, $dst$$FloatRegister); 8332 %} 8333 ins_pipe(ialu_reg_reg); 8334 %} 8335 8336 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{ 8337 predicate(UseVIS >= 3); 8338 match(Set dst (MoveD2L src)); 8339 8340 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %} 8341 ins_encode %{ 8342 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register); 8343 %} 8344 ins_pipe(ialu_reg_reg); 8345 %} 8346 8347 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{ 8348 predicate(UseVIS >= 3); 8349 match(Set dst (MoveL2D src)); 8350 8351 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %} 8352 ins_encode %{ 8353 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg)); 8354 %} 8355 ins_pipe(ialu_reg_reg); 8356 %} 8357 8358 8359 // Raw moves between float and general registers using stack. 8360 8361 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{ 8362 match(Set dst (MoveF2I src)); 8363 effect(DEF dst, USE src); 8364 ins_cost(MEMORY_REF_COST); 8365 8366 size(4); 8367 format %{ "LDUW $src,$dst\t! MoveF2I" %} 8368 opcode(Assembler::lduw_op3); 8369 ins_encode(simple_form3_mem_reg( src, dst ) ); 8370 ins_pipe(iload_mem); 8371 %} 8372 8373 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 8374 match(Set dst (MoveI2F src)); 8375 effect(DEF dst, USE src); 8376 ins_cost(MEMORY_REF_COST); 8377 8378 size(4); 8379 format %{ "LDF $src,$dst\t! MoveI2F" %} 8380 opcode(Assembler::ldf_op3); 8381 ins_encode(simple_form3_mem_reg(src, dst)); 8382 ins_pipe(floadF_stk); 8383 %} 8384 8385 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{ 8386 match(Set dst (MoveD2L src)); 8387 effect(DEF dst, USE src); 8388 ins_cost(MEMORY_REF_COST); 8389 8390 size(4); 8391 format %{ "LDX $src,$dst\t! MoveD2L" %} 8392 opcode(Assembler::ldx_op3); 8393 ins_encode(simple_form3_mem_reg( src, dst ) ); 8394 ins_pipe(iload_mem); 8395 %} 8396 8397 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 8398 match(Set dst (MoveL2D src)); 8399 effect(DEF dst, USE src); 8400 ins_cost(MEMORY_REF_COST); 8401 8402 size(4); 8403 format %{ "LDDF $src,$dst\t! MoveL2D" %} 8404 opcode(Assembler::lddf_op3); 8405 ins_encode(simple_form3_mem_reg(src, dst)); 8406 ins_pipe(floadD_stk); 8407 %} 8408 8409 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 8410 match(Set dst (MoveF2I src)); 8411 effect(DEF dst, USE src); 8412 ins_cost(MEMORY_REF_COST); 8413 8414 size(4); 8415 format %{ "STF $src,$dst\t! MoveF2I" %} 8416 opcode(Assembler::stf_op3); 8417 ins_encode(simple_form3_mem_reg(dst, src)); 8418 ins_pipe(fstoreF_stk_reg); 8419 %} 8420 8421 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ 8422 match(Set dst (MoveI2F src)); 8423 effect(DEF dst, USE src); 8424 ins_cost(MEMORY_REF_COST); 8425 8426 size(4); 8427 format %{ "STW $src,$dst\t! MoveI2F" %} 8428 opcode(Assembler::stw_op3); 8429 ins_encode(simple_form3_mem_reg( dst, src ) ); 8430 ins_pipe(istore_mem_reg); 8431 %} 8432 8433 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 8434 match(Set dst (MoveD2L src)); 8435 effect(DEF dst, USE src); 8436 ins_cost(MEMORY_REF_COST); 8437 8438 size(4); 8439 format %{ "STDF $src,$dst\t! MoveD2L" %} 8440 opcode(Assembler::stdf_op3); 8441 ins_encode(simple_form3_mem_reg(dst, src)); 8442 ins_pipe(fstoreD_stk_reg); 8443 %} 8444 8445 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ 8446 match(Set dst (MoveL2D src)); 8447 effect(DEF dst, USE src); 8448 ins_cost(MEMORY_REF_COST); 8449 8450 size(4); 8451 format %{ "STX $src,$dst\t! MoveL2D" %} 8452 opcode(Assembler::stx_op3); 8453 ins_encode(simple_form3_mem_reg( dst, src ) ); 8454 ins_pipe(istore_mem_reg); 8455 %} 8456 8457 8458 //----------Arithmetic Conversion Instructions--------------------------------- 8459 // The conversions operations are all Alpha sorted. Please keep it that way! 8460 8461 instruct convD2F_reg(regF dst, regD src) %{ 8462 match(Set dst (ConvD2F src)); 8463 size(4); 8464 format %{ "FDTOS $src,$dst" %} 8465 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf); 8466 ins_encode(form3_opf_rs2D_rdF(src, dst)); 8467 ins_pipe(fcvtD2F); 8468 %} 8469 8470 8471 // Convert a double to an int in a float register. 8472 // If the double is a NAN, stuff a zero in instead. 8473 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{ 8474 effect(DEF dst, USE src, KILL fcc0); 8475 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8476 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8477 "FDTOI $src,$dst\t! convert in delay slot\n\t" 8478 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8479 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8480 "skip:" %} 8481 ins_encode(form_d2i_helper(src,dst)); 8482 ins_pipe(fcvtD2I); 8483 %} 8484 8485 instruct convD2I_stk(stackSlotI dst, regD src) %{ 8486 match(Set dst (ConvD2I src)); 8487 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8488 expand %{ 8489 regF tmp; 8490 convD2I_helper(tmp, src); 8491 regF_to_stkI(dst, tmp); 8492 %} 8493 %} 8494 8495 instruct convD2I_reg(iRegI dst, regD src) %{ 8496 predicate(UseVIS >= 3); 8497 match(Set dst (ConvD2I src)); 8498 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8499 expand %{ 8500 regF tmp; 8501 convD2I_helper(tmp, src); 8502 MoveF2I_reg_reg(dst, tmp); 8503 %} 8504 %} 8505 8506 8507 // Convert a double to a long in a double register. 8508 // If the double is a NAN, stuff a zero in instead. 8509 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{ 8510 effect(DEF dst, USE src, KILL fcc0); 8511 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t" 8512 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8513 "FDTOX $src,$dst\t! convert in delay slot\n\t" 8514 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8515 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8516 "skip:" %} 8517 ins_encode(form_d2l_helper(src,dst)); 8518 ins_pipe(fcvtD2L); 8519 %} 8520 8521 instruct convD2L_stk(stackSlotL dst, regD src) %{ 8522 match(Set dst (ConvD2L src)); 8523 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8524 expand %{ 8525 regD tmp; 8526 convD2L_helper(tmp, src); 8527 regD_to_stkL(dst, tmp); 8528 %} 8529 %} 8530 8531 instruct convD2L_reg(iRegL dst, regD src) %{ 8532 predicate(UseVIS >= 3); 8533 match(Set dst (ConvD2L src)); 8534 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8535 expand %{ 8536 regD tmp; 8537 convD2L_helper(tmp, src); 8538 MoveD2L_reg_reg(dst, tmp); 8539 %} 8540 %} 8541 8542 8543 instruct convF2D_reg(regD dst, regF src) %{ 8544 match(Set dst (ConvF2D src)); 8545 format %{ "FSTOD $src,$dst" %} 8546 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf); 8547 ins_encode(form3_opf_rs2F_rdD(src, dst)); 8548 ins_pipe(fcvtF2D); 8549 %} 8550 8551 8552 // Convert a float to an int in a float register. 8553 // If the float is a NAN, stuff a zero in instead. 8554 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{ 8555 effect(DEF dst, USE src, KILL fcc0); 8556 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8557 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8558 "FSTOI $src,$dst\t! convert in delay slot\n\t" 8559 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t" 8560 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n" 8561 "skip:" %} 8562 ins_encode(form_f2i_helper(src,dst)); 8563 ins_pipe(fcvtF2I); 8564 %} 8565 8566 instruct convF2I_stk(stackSlotI dst, regF src) %{ 8567 match(Set dst (ConvF2I src)); 8568 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8569 expand %{ 8570 regF tmp; 8571 convF2I_helper(tmp, src); 8572 regF_to_stkI(dst, tmp); 8573 %} 8574 %} 8575 8576 instruct convF2I_reg(iRegI dst, regF src) %{ 8577 predicate(UseVIS >= 3); 8578 match(Set dst (ConvF2I src)); 8579 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8580 expand %{ 8581 regF tmp; 8582 convF2I_helper(tmp, src); 8583 MoveF2I_reg_reg(dst, tmp); 8584 %} 8585 %} 8586 8587 8588 // Convert a float to a long in a float register. 8589 // If the float is a NAN, stuff a zero in instead. 8590 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{ 8591 effect(DEF dst, USE src, KILL fcc0); 8592 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t" 8593 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t" 8594 "FSTOX $src,$dst\t! convert in delay slot\n\t" 8595 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t" 8596 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n" 8597 "skip:" %} 8598 ins_encode(form_f2l_helper(src,dst)); 8599 ins_pipe(fcvtF2L); 8600 %} 8601 8602 instruct convF2L_stk(stackSlotL dst, regF src) %{ 8603 match(Set dst (ConvF2L src)); 8604 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST); 8605 expand %{ 8606 regD tmp; 8607 convF2L_helper(tmp, src); 8608 regD_to_stkL(dst, tmp); 8609 %} 8610 %} 8611 8612 instruct convF2L_reg(iRegL dst, regF src) %{ 8613 predicate(UseVIS >= 3); 8614 match(Set dst (ConvF2L src)); 8615 ins_cost(DEFAULT_COST*2 + BRANCH_COST); 8616 expand %{ 8617 regD tmp; 8618 convF2L_helper(tmp, src); 8619 MoveD2L_reg_reg(dst, tmp); 8620 %} 8621 %} 8622 8623 8624 instruct convI2D_helper(regD dst, regF tmp) %{ 8625 effect(USE tmp, DEF dst); 8626 format %{ "FITOD $tmp,$dst" %} 8627 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8628 ins_encode(form3_opf_rs2F_rdD(tmp, dst)); 8629 ins_pipe(fcvtI2D); 8630 %} 8631 8632 instruct convI2D_stk(stackSlotI src, regD dst) %{ 8633 match(Set dst (ConvI2D src)); 8634 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8635 expand %{ 8636 regF tmp; 8637 stkI_to_regF(tmp, src); 8638 convI2D_helper(dst, tmp); 8639 %} 8640 %} 8641 8642 instruct convI2D_reg(regD_low dst, iRegI src) %{ 8643 predicate(UseVIS >= 3); 8644 match(Set dst (ConvI2D src)); 8645 expand %{ 8646 regF tmp; 8647 MoveI2F_reg_reg(tmp, src); 8648 convI2D_helper(dst, tmp); 8649 %} 8650 %} 8651 8652 instruct convI2D_mem(regD_low dst, memory mem) %{ 8653 match(Set dst (ConvI2D (LoadI mem))); 8654 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8655 size(8); 8656 format %{ "LDF $mem,$dst\n\t" 8657 "FITOD $dst,$dst" %} 8658 opcode(Assembler::ldf_op3, Assembler::fitod_opf); 8659 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8660 ins_pipe(floadF_mem); 8661 %} 8662 8663 8664 instruct convI2F_helper(regF dst, regF tmp) %{ 8665 effect(DEF dst, USE tmp); 8666 format %{ "FITOS $tmp,$dst" %} 8667 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf); 8668 ins_encode(form3_opf_rs2F_rdF(tmp, dst)); 8669 ins_pipe(fcvtI2F); 8670 %} 8671 8672 instruct convI2F_stk(regF dst, stackSlotI src) %{ 8673 match(Set dst (ConvI2F src)); 8674 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8675 expand %{ 8676 regF tmp; 8677 stkI_to_regF(tmp,src); 8678 convI2F_helper(dst, tmp); 8679 %} 8680 %} 8681 8682 instruct convI2F_reg(regF dst, iRegI src) %{ 8683 predicate(UseVIS >= 3); 8684 match(Set dst (ConvI2F src)); 8685 ins_cost(DEFAULT_COST); 8686 expand %{ 8687 regF tmp; 8688 MoveI2F_reg_reg(tmp, src); 8689 convI2F_helper(dst, tmp); 8690 %} 8691 %} 8692 8693 instruct convI2F_mem( regF dst, memory mem ) %{ 8694 match(Set dst (ConvI2F (LoadI mem))); 8695 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8696 size(8); 8697 format %{ "LDF $mem,$dst\n\t" 8698 "FITOS $dst,$dst" %} 8699 opcode(Assembler::ldf_op3, Assembler::fitos_opf); 8700 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst)); 8701 ins_pipe(floadF_mem); 8702 %} 8703 8704 8705 instruct convI2L_reg(iRegL dst, iRegI src) %{ 8706 match(Set dst (ConvI2L src)); 8707 size(4); 8708 format %{ "SRA $src,0,$dst\t! int->long" %} 8709 opcode(Assembler::sra_op3, Assembler::arith_op); 8710 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8711 ins_pipe(ialu_reg_reg); 8712 %} 8713 8714 // Zero-extend convert int to long 8715 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{ 8716 match(Set dst (AndL (ConvI2L src) mask) ); 8717 size(4); 8718 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %} 8719 opcode(Assembler::srl_op3, Assembler::arith_op); 8720 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8721 ins_pipe(ialu_reg_reg); 8722 %} 8723 8724 // Zero-extend long 8725 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{ 8726 match(Set dst (AndL src mask) ); 8727 size(4); 8728 format %{ "SRL $src,0,$dst\t! zero-extend long" %} 8729 opcode(Assembler::srl_op3, Assembler::arith_op); 8730 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) ); 8731 ins_pipe(ialu_reg_reg); 8732 %} 8733 8734 8735 //----------- 8736 // Long to Double conversion using V8 opcodes. 8737 // Still useful because cheetah traps and becomes 8738 // amazingly slow for some common numbers. 8739 8740 // Magic constant, 0x43300000 8741 instruct loadConI_x43300000(iRegI dst) %{ 8742 effect(DEF dst); 8743 size(4); 8744 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %} 8745 ins_encode(SetHi22(0x43300000, dst)); 8746 ins_pipe(ialu_none); 8747 %} 8748 8749 // Magic constant, 0x41f00000 8750 instruct loadConI_x41f00000(iRegI dst) %{ 8751 effect(DEF dst); 8752 size(4); 8753 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %} 8754 ins_encode(SetHi22(0x41f00000, dst)); 8755 ins_pipe(ialu_none); 8756 %} 8757 8758 // Construct a double from two float halves 8759 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{ 8760 effect(DEF dst, USE src1, USE src2); 8761 size(8); 8762 format %{ "FMOVS $src1.hi,$dst.hi\n\t" 8763 "FMOVS $src2.lo,$dst.lo" %} 8764 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf); 8765 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst)); 8766 ins_pipe(faddD_reg_reg); 8767 %} 8768 8769 // Convert integer in high half of a double register (in the lower half of 8770 // the double register file) to double 8771 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{ 8772 effect(DEF dst, USE src); 8773 size(4); 8774 format %{ "FITOD $src,$dst" %} 8775 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf); 8776 ins_encode(form3_opf_rs2D_rdD(src, dst)); 8777 ins_pipe(fcvtLHi2D); 8778 %} 8779 8780 // Add float double precision 8781 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{ 8782 effect(DEF dst, USE src1, USE src2); 8783 size(4); 8784 format %{ "FADDD $src1,$src2,$dst" %} 8785 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf); 8786 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8787 ins_pipe(faddD_reg_reg); 8788 %} 8789 8790 // Sub float double precision 8791 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{ 8792 effect(DEF dst, USE src1, USE src2); 8793 size(4); 8794 format %{ "FSUBD $src1,$src2,$dst" %} 8795 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf); 8796 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8797 ins_pipe(faddD_reg_reg); 8798 %} 8799 8800 // Mul float double precision 8801 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{ 8802 effect(DEF dst, USE src1, USE src2); 8803 size(4); 8804 format %{ "FMULD $src1,$src2,$dst" %} 8805 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf); 8806 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst)); 8807 ins_pipe(fmulD_reg_reg); 8808 %} 8809 8810 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{ 8811 match(Set dst (ConvL2D src)); 8812 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6); 8813 8814 expand %{ 8815 regD_low tmpsrc; 8816 iRegI ix43300000; 8817 iRegI ix41f00000; 8818 stackSlotL lx43300000; 8819 stackSlotL lx41f00000; 8820 regD_low dx43300000; 8821 regD dx41f00000; 8822 regD tmp1; 8823 regD_low tmp2; 8824 regD tmp3; 8825 regD tmp4; 8826 8827 stkL_to_regD(tmpsrc, src); 8828 8829 loadConI_x43300000(ix43300000); 8830 loadConI_x41f00000(ix41f00000); 8831 regI_to_stkLHi(lx43300000, ix43300000); 8832 regI_to_stkLHi(lx41f00000, ix41f00000); 8833 stkL_to_regD(dx43300000, lx43300000); 8834 stkL_to_regD(dx41f00000, lx41f00000); 8835 8836 convI2D_regDHi_regD(tmp1, tmpsrc); 8837 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc); 8838 subD_regD_regD(tmp3, tmp2, dx43300000); 8839 mulD_regD_regD(tmp4, tmp1, dx41f00000); 8840 addD_regD_regD(dst, tmp3, tmp4); 8841 %} 8842 %} 8843 8844 // Long to Double conversion using fast fxtof 8845 instruct convL2D_helper(regD dst, regD tmp) %{ 8846 effect(DEF dst, USE tmp); 8847 size(4); 8848 format %{ "FXTOD $tmp,$dst" %} 8849 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf); 8850 ins_encode(form3_opf_rs2D_rdD(tmp, dst)); 8851 ins_pipe(fcvtL2D); 8852 %} 8853 8854 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{ 8855 predicate(VM_Version::has_fast_fxtof()); 8856 match(Set dst (ConvL2D src)); 8857 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST); 8858 expand %{ 8859 regD tmp; 8860 stkL_to_regD(tmp, src); 8861 convL2D_helper(dst, tmp); 8862 %} 8863 %} 8864 8865 instruct convL2D_reg(regD dst, iRegL src) %{ 8866 predicate(UseVIS >= 3); 8867 match(Set dst (ConvL2D src)); 8868 expand %{ 8869 regD tmp; 8870 MoveL2D_reg_reg(tmp, src); 8871 convL2D_helper(dst, tmp); 8872 %} 8873 %} 8874 8875 // Long to Float conversion using fast fxtof 8876 instruct convL2F_helper(regF dst, regD tmp) %{ 8877 effect(DEF dst, USE tmp); 8878 size(4); 8879 format %{ "FXTOS $tmp,$dst" %} 8880 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf); 8881 ins_encode(form3_opf_rs2D_rdF(tmp, dst)); 8882 ins_pipe(fcvtL2F); 8883 %} 8884 8885 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{ 8886 match(Set dst (ConvL2F src)); 8887 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 8888 expand %{ 8889 regD tmp; 8890 stkL_to_regD(tmp, src); 8891 convL2F_helper(dst, tmp); 8892 %} 8893 %} 8894 8895 instruct convL2F_reg(regF dst, iRegL src) %{ 8896 predicate(UseVIS >= 3); 8897 match(Set dst (ConvL2F src)); 8898 ins_cost(DEFAULT_COST); 8899 expand %{ 8900 regD tmp; 8901 MoveL2D_reg_reg(tmp, src); 8902 convL2F_helper(dst, tmp); 8903 %} 8904 %} 8905 8906 //----------- 8907 8908 instruct convL2I_reg(iRegI dst, iRegL src) %{ 8909 match(Set dst (ConvL2I src)); 8910 #ifndef _LP64 8911 format %{ "MOV $src.lo,$dst\t! long->int" %} 8912 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) ); 8913 ins_pipe(ialu_move_reg_I_to_L); 8914 #else 8915 size(4); 8916 format %{ "SRA $src,R_G0,$dst\t! long->int" %} 8917 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) ); 8918 ins_pipe(ialu_reg); 8919 #endif 8920 %} 8921 8922 // Register Shift Right Immediate 8923 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{ 8924 match(Set dst (ConvL2I (RShiftL src cnt))); 8925 8926 size(4); 8927 format %{ "SRAX $src,$cnt,$dst" %} 8928 opcode(Assembler::srax_op3, Assembler::arith_op); 8929 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) ); 8930 ins_pipe(ialu_reg_imm); 8931 %} 8932 8933 //----------Control Flow Instructions------------------------------------------ 8934 // Compare Instructions 8935 // Compare Integers 8936 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{ 8937 match(Set icc (CmpI op1 op2)); 8938 effect( DEF icc, USE op1, USE op2 ); 8939 8940 size(4); 8941 format %{ "CMP $op1,$op2" %} 8942 opcode(Assembler::subcc_op3, Assembler::arith_op); 8943 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8944 ins_pipe(ialu_cconly_reg_reg); 8945 %} 8946 8947 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{ 8948 match(Set icc (CmpU op1 op2)); 8949 8950 size(4); 8951 format %{ "CMP $op1,$op2\t! unsigned" %} 8952 opcode(Assembler::subcc_op3, Assembler::arith_op); 8953 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8954 ins_pipe(ialu_cconly_reg_reg); 8955 %} 8956 8957 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{ 8958 match(Set icc (CmpI op1 op2)); 8959 effect( DEF icc, USE op1 ); 8960 8961 size(4); 8962 format %{ "CMP $op1,$op2" %} 8963 opcode(Assembler::subcc_op3, Assembler::arith_op); 8964 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8965 ins_pipe(ialu_cconly_reg_imm); 8966 %} 8967 8968 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{ 8969 match(Set icc (CmpI (AndI op1 op2) zero)); 8970 8971 size(4); 8972 format %{ "BTST $op2,$op1" %} 8973 opcode(Assembler::andcc_op3, Assembler::arith_op); 8974 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8975 ins_pipe(ialu_cconly_reg_reg_zero); 8976 %} 8977 8978 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{ 8979 match(Set icc (CmpI (AndI op1 op2) zero)); 8980 8981 size(4); 8982 format %{ "BTST $op2,$op1" %} 8983 opcode(Assembler::andcc_op3, Assembler::arith_op); 8984 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 8985 ins_pipe(ialu_cconly_reg_imm_zero); 8986 %} 8987 8988 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{ 8989 match(Set xcc (CmpL op1 op2)); 8990 effect( DEF xcc, USE op1, USE op2 ); 8991 8992 size(4); 8993 format %{ "CMP $op1,$op2\t\t! long" %} 8994 opcode(Assembler::subcc_op3, Assembler::arith_op); 8995 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 8996 ins_pipe(ialu_cconly_reg_reg); 8997 %} 8998 8999 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{ 9000 match(Set xcc (CmpL op1 con)); 9001 effect( DEF xcc, USE op1, USE con ); 9002 9003 size(4); 9004 format %{ "CMP $op1,$con\t\t! long" %} 9005 opcode(Assembler::subcc_op3, Assembler::arith_op); 9006 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 9007 ins_pipe(ialu_cconly_reg_reg); 9008 %} 9009 9010 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{ 9011 match(Set xcc (CmpL (AndL op1 op2) zero)); 9012 effect( DEF xcc, USE op1, USE op2 ); 9013 9014 size(4); 9015 format %{ "BTST $op1,$op2\t\t! long" %} 9016 opcode(Assembler::andcc_op3, Assembler::arith_op); 9017 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9018 ins_pipe(ialu_cconly_reg_reg); 9019 %} 9020 9021 // useful for checking the alignment of a pointer: 9022 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{ 9023 match(Set xcc (CmpL (AndL op1 con) zero)); 9024 effect( DEF xcc, USE op1, USE con ); 9025 9026 size(4); 9027 format %{ "BTST $op1,$con\t\t! long" %} 9028 opcode(Assembler::andcc_op3, Assembler::arith_op); 9029 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) ); 9030 ins_pipe(ialu_cconly_reg_reg); 9031 %} 9032 9033 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{ 9034 match(Set icc (CmpU op1 op2)); 9035 9036 size(4); 9037 format %{ "CMP $op1,$op2\t! unsigned" %} 9038 opcode(Assembler::subcc_op3, Assembler::arith_op); 9039 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9040 ins_pipe(ialu_cconly_reg_imm); 9041 %} 9042 9043 // Compare Pointers 9044 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{ 9045 match(Set pcc (CmpP op1 op2)); 9046 9047 size(4); 9048 format %{ "CMP $op1,$op2\t! ptr" %} 9049 opcode(Assembler::subcc_op3, Assembler::arith_op); 9050 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9051 ins_pipe(ialu_cconly_reg_reg); 9052 %} 9053 9054 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{ 9055 match(Set pcc (CmpP op1 op2)); 9056 9057 size(4); 9058 format %{ "CMP $op1,$op2\t! ptr" %} 9059 opcode(Assembler::subcc_op3, Assembler::arith_op); 9060 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9061 ins_pipe(ialu_cconly_reg_imm); 9062 %} 9063 9064 // Compare Narrow oops 9065 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{ 9066 match(Set icc (CmpN op1 op2)); 9067 9068 size(4); 9069 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9070 opcode(Assembler::subcc_op3, Assembler::arith_op); 9071 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) ); 9072 ins_pipe(ialu_cconly_reg_reg); 9073 %} 9074 9075 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{ 9076 match(Set icc (CmpN op1 op2)); 9077 9078 size(4); 9079 format %{ "CMP $op1,$op2\t! compressed ptr" %} 9080 opcode(Assembler::subcc_op3, Assembler::arith_op); 9081 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) ); 9082 ins_pipe(ialu_cconly_reg_imm); 9083 %} 9084 9085 //----------Max and Min-------------------------------------------------------- 9086 // Min Instructions 9087 // Conditional move for min 9088 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9089 effect( USE_DEF op2, USE op1, USE icc ); 9090 9091 size(4); 9092 format %{ "MOVlt icc,$op1,$op2\t! min" %} 9093 opcode(Assembler::less); 9094 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9095 ins_pipe(ialu_reg_flags); 9096 %} 9097 9098 // Min Register with Register. 9099 instruct minI_eReg(iRegI op1, iRegI op2) %{ 9100 match(Set op2 (MinI op1 op2)); 9101 ins_cost(DEFAULT_COST*2); 9102 expand %{ 9103 flagsReg icc; 9104 compI_iReg(icc,op1,op2); 9105 cmovI_reg_lt(op2,op1,icc); 9106 %} 9107 %} 9108 9109 // Max Instructions 9110 // Conditional move for max 9111 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{ 9112 effect( USE_DEF op2, USE op1, USE icc ); 9113 format %{ "MOVgt icc,$op1,$op2\t! max" %} 9114 opcode(Assembler::greater); 9115 ins_encode( enc_cmov_reg_minmax(op2,op1) ); 9116 ins_pipe(ialu_reg_flags); 9117 %} 9118 9119 // Max Register with Register 9120 instruct maxI_eReg(iRegI op1, iRegI op2) %{ 9121 match(Set op2 (MaxI op1 op2)); 9122 ins_cost(DEFAULT_COST*2); 9123 expand %{ 9124 flagsReg icc; 9125 compI_iReg(icc,op1,op2); 9126 cmovI_reg_gt(op2,op1,icc); 9127 %} 9128 %} 9129 9130 9131 //----------Float Compares---------------------------------------------------- 9132 // Compare floating, generate condition code 9133 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{ 9134 match(Set fcc (CmpF src1 src2)); 9135 9136 size(4); 9137 format %{ "FCMPs $fcc,$src1,$src2" %} 9138 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf); 9139 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) ); 9140 ins_pipe(faddF_fcc_reg_reg_zero); 9141 %} 9142 9143 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{ 9144 match(Set fcc (CmpD src1 src2)); 9145 9146 size(4); 9147 format %{ "FCMPd $fcc,$src1,$src2" %} 9148 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf); 9149 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) ); 9150 ins_pipe(faddD_fcc_reg_reg_zero); 9151 %} 9152 9153 9154 // Compare floating, generate -1,0,1 9155 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{ 9156 match(Set dst (CmpF3 src1 src2)); 9157 effect(KILL fcc0); 9158 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9159 format %{ "fcmpl $dst,$src1,$src2" %} 9160 // Primary = float 9161 opcode( true ); 9162 ins_encode( floating_cmp( dst, src1, src2 ) ); 9163 ins_pipe( floating_cmp ); 9164 %} 9165 9166 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{ 9167 match(Set dst (CmpD3 src1 src2)); 9168 effect(KILL fcc0); 9169 ins_cost(DEFAULT_COST*3+BRANCH_COST*3); 9170 format %{ "dcmpl $dst,$src1,$src2" %} 9171 // Primary = double (not float) 9172 opcode( false ); 9173 ins_encode( floating_cmp( dst, src1, src2 ) ); 9174 ins_pipe( floating_cmp ); 9175 %} 9176 9177 //----------Branches--------------------------------------------------------- 9178 // Jump 9179 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above) 9180 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{ 9181 match(Jump switch_val); 9182 effect(TEMP table); 9183 9184 ins_cost(350); 9185 9186 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 9187 "LD [O7 + $switch_val], O7\n\t" 9188 "JUMP O7" %} 9189 ins_encode %{ 9190 // Calculate table address into a register. 9191 Register table_reg; 9192 Register label_reg = O7; 9193 // If we are calculating the size of this instruction don't trust 9194 // zero offsets because they might change when 9195 // MachConstantBaseNode decides to optimize the constant table 9196 // base. 9197 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) { 9198 table_reg = $constanttablebase; 9199 } else { 9200 table_reg = O7; 9201 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 9202 __ add($constanttablebase, con_offset, table_reg); 9203 } 9204 9205 // Jump to base address + switch value 9206 __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 9207 __ jmp(label_reg, G0); 9208 __ delayed()->nop(); 9209 %} 9210 ins_pipe(ialu_reg_reg); 9211 %} 9212 9213 // Direct Branch. Use V8 version with longer range. 9214 instruct branch(label labl) %{ 9215 match(Goto); 9216 effect(USE labl); 9217 9218 size(8); 9219 ins_cost(BRANCH_COST); 9220 format %{ "BA $labl" %} 9221 ins_encode %{ 9222 Label* L = $labl$$label; 9223 __ ba(*L); 9224 __ delayed()->nop(); 9225 %} 9226 ins_avoid_back_to_back(AVOID_BEFORE); 9227 ins_pipe(br); 9228 %} 9229 9230 // Direct Branch, short with no delay slot 9231 instruct branch_short(label labl) %{ 9232 match(Goto); 9233 predicate(UseCBCond); 9234 effect(USE labl); 9235 9236 size(4); 9237 ins_cost(BRANCH_COST); 9238 format %{ "BA $labl\t! short branch" %} 9239 ins_encode %{ 9240 Label* L = $labl$$label; 9241 assert(__ use_cbcond(*L), "back to back cbcond"); 9242 __ ba_short(*L); 9243 %} 9244 ins_short_branch(1); 9245 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9246 ins_pipe(cbcond_reg_imm); 9247 %} 9248 9249 // Conditional Direct Branch 9250 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{ 9251 match(If cmp icc); 9252 effect(USE labl); 9253 9254 size(8); 9255 ins_cost(BRANCH_COST); 9256 format %{ "BP$cmp $icc,$labl" %} 9257 // Prim = bits 24-22, Secnd = bits 31-30 9258 ins_encode( enc_bp( labl, cmp, icc ) ); 9259 ins_avoid_back_to_back(AVOID_BEFORE); 9260 ins_pipe(br_cc); 9261 %} 9262 9263 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9264 match(If cmp icc); 9265 effect(USE labl); 9266 9267 ins_cost(BRANCH_COST); 9268 format %{ "BP$cmp $icc,$labl" %} 9269 // Prim = bits 24-22, Secnd = bits 31-30 9270 ins_encode( enc_bp( labl, cmp, icc ) ); 9271 ins_avoid_back_to_back(AVOID_BEFORE); 9272 ins_pipe(br_cc); 9273 %} 9274 9275 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{ 9276 match(If cmp pcc); 9277 effect(USE labl); 9278 9279 size(8); 9280 ins_cost(BRANCH_COST); 9281 format %{ "BP$cmp $pcc,$labl" %} 9282 ins_encode %{ 9283 Label* L = $labl$$label; 9284 Assembler::Predict predict_taken = 9285 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9286 9287 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9288 __ delayed()->nop(); 9289 %} 9290 ins_avoid_back_to_back(AVOID_BEFORE); 9291 ins_pipe(br_cc); 9292 %} 9293 9294 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{ 9295 match(If cmp fcc); 9296 effect(USE labl); 9297 9298 size(8); 9299 ins_cost(BRANCH_COST); 9300 format %{ "FBP$cmp $fcc,$labl" %} 9301 ins_encode %{ 9302 Label* L = $labl$$label; 9303 Assembler::Predict predict_taken = 9304 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9305 9306 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L); 9307 __ delayed()->nop(); 9308 %} 9309 ins_avoid_back_to_back(AVOID_BEFORE); 9310 ins_pipe(br_fcc); 9311 %} 9312 9313 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{ 9314 match(CountedLoopEnd cmp icc); 9315 effect(USE labl); 9316 9317 size(8); 9318 ins_cost(BRANCH_COST); 9319 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9320 // Prim = bits 24-22, Secnd = bits 31-30 9321 ins_encode( enc_bp( labl, cmp, icc ) ); 9322 ins_avoid_back_to_back(AVOID_BEFORE); 9323 ins_pipe(br_cc); 9324 %} 9325 9326 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{ 9327 match(CountedLoopEnd cmp icc); 9328 effect(USE labl); 9329 9330 size(8); 9331 ins_cost(BRANCH_COST); 9332 format %{ "BP$cmp $icc,$labl\t! Loop end" %} 9333 // Prim = bits 24-22, Secnd = bits 31-30 9334 ins_encode( enc_bp( labl, cmp, icc ) ); 9335 ins_avoid_back_to_back(AVOID_BEFORE); 9336 ins_pipe(br_cc); 9337 %} 9338 9339 // Compare and branch instructions 9340 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9341 match(If cmp (CmpI op1 op2)); 9342 effect(USE labl, KILL icc); 9343 9344 size(12); 9345 ins_cost(BRANCH_COST); 9346 format %{ "CMP $op1,$op2\t! int\n\t" 9347 "BP$cmp $labl" %} 9348 ins_encode %{ 9349 Label* L = $labl$$label; 9350 Assembler::Predict predict_taken = 9351 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9352 __ cmp($op1$$Register, $op2$$Register); 9353 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9354 __ delayed()->nop(); 9355 %} 9356 ins_pipe(cmp_br_reg_reg); 9357 %} 9358 9359 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9360 match(If cmp (CmpI op1 op2)); 9361 effect(USE labl, KILL icc); 9362 9363 size(12); 9364 ins_cost(BRANCH_COST); 9365 format %{ "CMP $op1,$op2\t! int\n\t" 9366 "BP$cmp $labl" %} 9367 ins_encode %{ 9368 Label* L = $labl$$label; 9369 Assembler::Predict predict_taken = 9370 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9371 __ cmp($op1$$Register, $op2$$constant); 9372 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9373 __ delayed()->nop(); 9374 %} 9375 ins_pipe(cmp_br_reg_imm); 9376 %} 9377 9378 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9379 match(If cmp (CmpU op1 op2)); 9380 effect(USE labl, KILL icc); 9381 9382 size(12); 9383 ins_cost(BRANCH_COST); 9384 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9385 "BP$cmp $labl" %} 9386 ins_encode %{ 9387 Label* L = $labl$$label; 9388 Assembler::Predict predict_taken = 9389 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9390 __ cmp($op1$$Register, $op2$$Register); 9391 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9392 __ delayed()->nop(); 9393 %} 9394 ins_pipe(cmp_br_reg_reg); 9395 %} 9396 9397 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9398 match(If cmp (CmpU op1 op2)); 9399 effect(USE labl, KILL icc); 9400 9401 size(12); 9402 ins_cost(BRANCH_COST); 9403 format %{ "CMP $op1,$op2\t! unsigned\n\t" 9404 "BP$cmp $labl" %} 9405 ins_encode %{ 9406 Label* L = $labl$$label; 9407 Assembler::Predict predict_taken = 9408 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9409 __ cmp($op1$$Register, $op2$$constant); 9410 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9411 __ delayed()->nop(); 9412 %} 9413 ins_pipe(cmp_br_reg_imm); 9414 %} 9415 9416 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9417 match(If cmp (CmpL op1 op2)); 9418 effect(USE labl, KILL xcc); 9419 9420 size(12); 9421 ins_cost(BRANCH_COST); 9422 format %{ "CMP $op1,$op2\t! long\n\t" 9423 "BP$cmp $labl" %} 9424 ins_encode %{ 9425 Label* L = $labl$$label; 9426 Assembler::Predict predict_taken = 9427 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9428 __ cmp($op1$$Register, $op2$$Register); 9429 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9430 __ delayed()->nop(); 9431 %} 9432 ins_pipe(cmp_br_reg_reg); 9433 %} 9434 9435 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9436 match(If cmp (CmpL op1 op2)); 9437 effect(USE labl, KILL xcc); 9438 9439 size(12); 9440 ins_cost(BRANCH_COST); 9441 format %{ "CMP $op1,$op2\t! long\n\t" 9442 "BP$cmp $labl" %} 9443 ins_encode %{ 9444 Label* L = $labl$$label; 9445 Assembler::Predict predict_taken = 9446 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9447 __ cmp($op1$$Register, $op2$$constant); 9448 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9449 __ delayed()->nop(); 9450 %} 9451 ins_pipe(cmp_br_reg_imm); 9452 %} 9453 9454 // Compare Pointers and branch 9455 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9456 match(If cmp (CmpP op1 op2)); 9457 effect(USE labl, KILL pcc); 9458 9459 size(12); 9460 ins_cost(BRANCH_COST); 9461 format %{ "CMP $op1,$op2\t! ptr\n\t" 9462 "B$cmp $labl" %} 9463 ins_encode %{ 9464 Label* L = $labl$$label; 9465 Assembler::Predict predict_taken = 9466 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9467 __ cmp($op1$$Register, $op2$$Register); 9468 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9469 __ delayed()->nop(); 9470 %} 9471 ins_pipe(cmp_br_reg_reg); 9472 %} 9473 9474 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9475 match(If cmp (CmpP op1 null)); 9476 effect(USE labl, KILL pcc); 9477 9478 size(12); 9479 ins_cost(BRANCH_COST); 9480 format %{ "CMP $op1,0\t! ptr\n\t" 9481 "B$cmp $labl" %} 9482 ins_encode %{ 9483 Label* L = $labl$$label; 9484 Assembler::Predict predict_taken = 9485 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9486 __ cmp($op1$$Register, G0); 9487 // bpr() is not used here since it has shorter distance. 9488 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L); 9489 __ delayed()->nop(); 9490 %} 9491 ins_pipe(cmp_br_reg_reg); 9492 %} 9493 9494 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9495 match(If cmp (CmpN op1 op2)); 9496 effect(USE labl, KILL icc); 9497 9498 size(12); 9499 ins_cost(BRANCH_COST); 9500 format %{ "CMP $op1,$op2\t! compressed ptr\n\t" 9501 "BP$cmp $labl" %} 9502 ins_encode %{ 9503 Label* L = $labl$$label; 9504 Assembler::Predict predict_taken = 9505 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9506 __ cmp($op1$$Register, $op2$$Register); 9507 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9508 __ delayed()->nop(); 9509 %} 9510 ins_pipe(cmp_br_reg_reg); 9511 %} 9512 9513 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9514 match(If cmp (CmpN op1 null)); 9515 effect(USE labl, KILL icc); 9516 9517 size(12); 9518 ins_cost(BRANCH_COST); 9519 format %{ "CMP $op1,0\t! compressed ptr\n\t" 9520 "BP$cmp $labl" %} 9521 ins_encode %{ 9522 Label* L = $labl$$label; 9523 Assembler::Predict predict_taken = 9524 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9525 __ cmp($op1$$Register, G0); 9526 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9527 __ delayed()->nop(); 9528 %} 9529 ins_pipe(cmp_br_reg_reg); 9530 %} 9531 9532 // Loop back branch 9533 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9534 match(CountedLoopEnd cmp (CmpI op1 op2)); 9535 effect(USE labl, KILL icc); 9536 9537 size(12); 9538 ins_cost(BRANCH_COST); 9539 format %{ "CMP $op1,$op2\t! int\n\t" 9540 "BP$cmp $labl\t! Loop end" %} 9541 ins_encode %{ 9542 Label* L = $labl$$label; 9543 Assembler::Predict predict_taken = 9544 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9545 __ cmp($op1$$Register, $op2$$Register); 9546 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9547 __ delayed()->nop(); 9548 %} 9549 ins_pipe(cmp_br_reg_reg); 9550 %} 9551 9552 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9553 match(CountedLoopEnd cmp (CmpI op1 op2)); 9554 effect(USE labl, KILL icc); 9555 9556 size(12); 9557 ins_cost(BRANCH_COST); 9558 format %{ "CMP $op1,$op2\t! int\n\t" 9559 "BP$cmp $labl\t! Loop end" %} 9560 ins_encode %{ 9561 Label* L = $labl$$label; 9562 Assembler::Predict predict_taken = 9563 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9564 __ cmp($op1$$Register, $op2$$constant); 9565 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L); 9566 __ delayed()->nop(); 9567 %} 9568 ins_pipe(cmp_br_reg_imm); 9569 %} 9570 9571 // Short compare and branch instructions 9572 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9573 match(If cmp (CmpI op1 op2)); 9574 predicate(UseCBCond); 9575 effect(USE labl, KILL icc); 9576 9577 size(4); 9578 ins_cost(BRANCH_COST); 9579 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9580 ins_encode %{ 9581 Label* L = $labl$$label; 9582 assert(__ use_cbcond(*L), "back to back cbcond"); 9583 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9584 %} 9585 ins_short_branch(1); 9586 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9587 ins_pipe(cbcond_reg_reg); 9588 %} 9589 9590 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9591 match(If cmp (CmpI op1 op2)); 9592 predicate(UseCBCond); 9593 effect(USE labl, KILL icc); 9594 9595 size(4); 9596 ins_cost(BRANCH_COST); 9597 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %} 9598 ins_encode %{ 9599 Label* L = $labl$$label; 9600 assert(__ use_cbcond(*L), "back to back cbcond"); 9601 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9602 %} 9603 ins_short_branch(1); 9604 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9605 ins_pipe(cbcond_reg_imm); 9606 %} 9607 9608 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{ 9609 match(If cmp (CmpU op1 op2)); 9610 predicate(UseCBCond); 9611 effect(USE labl, KILL icc); 9612 9613 size(4); 9614 ins_cost(BRANCH_COST); 9615 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9616 ins_encode %{ 9617 Label* L = $labl$$label; 9618 assert(__ use_cbcond(*L), "back to back cbcond"); 9619 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9620 %} 9621 ins_short_branch(1); 9622 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9623 ins_pipe(cbcond_reg_reg); 9624 %} 9625 9626 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{ 9627 match(If cmp (CmpU op1 op2)); 9628 predicate(UseCBCond); 9629 effect(USE labl, KILL icc); 9630 9631 size(4); 9632 ins_cost(BRANCH_COST); 9633 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %} 9634 ins_encode %{ 9635 Label* L = $labl$$label; 9636 assert(__ use_cbcond(*L), "back to back cbcond"); 9637 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9638 %} 9639 ins_short_branch(1); 9640 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9641 ins_pipe(cbcond_reg_imm); 9642 %} 9643 9644 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{ 9645 match(If cmp (CmpL op1 op2)); 9646 predicate(UseCBCond); 9647 effect(USE labl, KILL xcc); 9648 9649 size(4); 9650 ins_cost(BRANCH_COST); 9651 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9652 ins_encode %{ 9653 Label* L = $labl$$label; 9654 assert(__ use_cbcond(*L), "back to back cbcond"); 9655 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L); 9656 %} 9657 ins_short_branch(1); 9658 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9659 ins_pipe(cbcond_reg_reg); 9660 %} 9661 9662 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{ 9663 match(If cmp (CmpL op1 op2)); 9664 predicate(UseCBCond); 9665 effect(USE labl, KILL xcc); 9666 9667 size(4); 9668 ins_cost(BRANCH_COST); 9669 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %} 9670 ins_encode %{ 9671 Label* L = $labl$$label; 9672 assert(__ use_cbcond(*L), "back to back cbcond"); 9673 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L); 9674 %} 9675 ins_short_branch(1); 9676 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9677 ins_pipe(cbcond_reg_imm); 9678 %} 9679 9680 // Compare Pointers and branch 9681 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{ 9682 match(If cmp (CmpP op1 op2)); 9683 predicate(UseCBCond); 9684 effect(USE labl, KILL pcc); 9685 9686 size(4); 9687 ins_cost(BRANCH_COST); 9688 #ifdef _LP64 9689 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %} 9690 #else 9691 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %} 9692 #endif 9693 ins_encode %{ 9694 Label* L = $labl$$label; 9695 assert(__ use_cbcond(*L), "back to back cbcond"); 9696 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L); 9697 %} 9698 ins_short_branch(1); 9699 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9700 ins_pipe(cbcond_reg_reg); 9701 %} 9702 9703 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{ 9704 match(If cmp (CmpP op1 null)); 9705 predicate(UseCBCond); 9706 effect(USE labl, KILL pcc); 9707 9708 size(4); 9709 ins_cost(BRANCH_COST); 9710 #ifdef _LP64 9711 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %} 9712 #else 9713 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %} 9714 #endif 9715 ins_encode %{ 9716 Label* L = $labl$$label; 9717 assert(__ use_cbcond(*L), "back to back cbcond"); 9718 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L); 9719 %} 9720 ins_short_branch(1); 9721 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9722 ins_pipe(cbcond_reg_reg); 9723 %} 9724 9725 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{ 9726 match(If cmp (CmpN op1 op2)); 9727 predicate(UseCBCond); 9728 effect(USE labl, KILL icc); 9729 9730 size(4); 9731 ins_cost(BRANCH_COST); 9732 format %{ "CWB$cmp $op1,$op2,$labl\t! compressed ptr" %} 9733 ins_encode %{ 9734 Label* L = $labl$$label; 9735 assert(__ use_cbcond(*L), "back to back cbcond"); 9736 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9737 %} 9738 ins_short_branch(1); 9739 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9740 ins_pipe(cbcond_reg_reg); 9741 %} 9742 9743 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{ 9744 match(If cmp (CmpN op1 null)); 9745 predicate(UseCBCond); 9746 effect(USE labl, KILL icc); 9747 9748 size(4); 9749 ins_cost(BRANCH_COST); 9750 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %} 9751 ins_encode %{ 9752 Label* L = $labl$$label; 9753 assert(__ use_cbcond(*L), "back to back cbcond"); 9754 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L); 9755 %} 9756 ins_short_branch(1); 9757 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9758 ins_pipe(cbcond_reg_reg); 9759 %} 9760 9761 // Loop back branch 9762 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{ 9763 match(CountedLoopEnd cmp (CmpI op1 op2)); 9764 predicate(UseCBCond); 9765 effect(USE labl, KILL icc); 9766 9767 size(4); 9768 ins_cost(BRANCH_COST); 9769 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9770 ins_encode %{ 9771 Label* L = $labl$$label; 9772 assert(__ use_cbcond(*L), "back to back cbcond"); 9773 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L); 9774 %} 9775 ins_short_branch(1); 9776 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9777 ins_pipe(cbcond_reg_reg); 9778 %} 9779 9780 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{ 9781 match(CountedLoopEnd cmp (CmpI op1 op2)); 9782 predicate(UseCBCond); 9783 effect(USE labl, KILL icc); 9784 9785 size(4); 9786 ins_cost(BRANCH_COST); 9787 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %} 9788 ins_encode %{ 9789 Label* L = $labl$$label; 9790 assert(__ use_cbcond(*L), "back to back cbcond"); 9791 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L); 9792 %} 9793 ins_short_branch(1); 9794 ins_avoid_back_to_back(AVOID_BEFORE_AND_AFTER); 9795 ins_pipe(cbcond_reg_imm); 9796 %} 9797 9798 // Branch-on-register tests all 64 bits. We assume that values 9799 // in 64-bit registers always remains zero or sign extended 9800 // unless our code munges the high bits. Interrupts can chop 9801 // the high order bits to zero or sign at any time. 9802 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{ 9803 match(If cmp (CmpI op1 zero)); 9804 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9805 effect(USE labl); 9806 9807 size(8); 9808 ins_cost(BRANCH_COST); 9809 format %{ "BR$cmp $op1,$labl" %} 9810 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9811 ins_avoid_back_to_back(AVOID_BEFORE); 9812 ins_pipe(br_reg); 9813 %} 9814 9815 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{ 9816 match(If cmp (CmpP op1 null)); 9817 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9818 effect(USE labl); 9819 9820 size(8); 9821 ins_cost(BRANCH_COST); 9822 format %{ "BR$cmp $op1,$labl" %} 9823 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9824 ins_avoid_back_to_back(AVOID_BEFORE); 9825 ins_pipe(br_reg); 9826 %} 9827 9828 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{ 9829 match(If cmp (CmpL op1 zero)); 9830 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf)); 9831 effect(USE labl); 9832 9833 size(8); 9834 ins_cost(BRANCH_COST); 9835 format %{ "BR$cmp $op1,$labl" %} 9836 ins_encode( enc_bpr( labl, cmp, op1 ) ); 9837 ins_avoid_back_to_back(AVOID_BEFORE); 9838 ins_pipe(br_reg); 9839 %} 9840 9841 9842 // ============================================================================ 9843 // Long Compare 9844 // 9845 // Currently we hold longs in 2 registers. Comparing such values efficiently 9846 // is tricky. The flavor of compare used depends on whether we are testing 9847 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit. 9848 // The GE test is the negated LT test. The LE test can be had by commuting 9849 // the operands (yielding a GE test) and then negating; negate again for the 9850 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the 9851 // NE test is negated from that. 9852 9853 // Due to a shortcoming in the ADLC, it mixes up expressions like: 9854 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the 9855 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections 9856 // are collapsed internally in the ADLC's dfa-gen code. The match for 9857 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the 9858 // foo match ends up with the wrong leaf. One fix is to not match both 9859 // reg-reg and reg-zero forms of long-compare. This is unfortunate because 9860 // both forms beat the trinary form of long-compare and both are very useful 9861 // on Intel which has so few registers. 9862 9863 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{ 9864 match(If cmp xcc); 9865 effect(USE labl); 9866 9867 size(8); 9868 ins_cost(BRANCH_COST); 9869 format %{ "BP$cmp $xcc,$labl" %} 9870 ins_encode %{ 9871 Label* L = $labl$$label; 9872 Assembler::Predict predict_taken = 9873 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn; 9874 9875 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L); 9876 __ delayed()->nop(); 9877 %} 9878 ins_avoid_back_to_back(AVOID_BEFORE); 9879 ins_pipe(br_cc); 9880 %} 9881 9882 // Manifest a CmpL3 result in an integer register. Very painful. 9883 // This is the test to avoid. 9884 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{ 9885 match(Set dst (CmpL3 src1 src2) ); 9886 effect( KILL ccr ); 9887 ins_cost(6*DEFAULT_COST); 9888 size(24); 9889 format %{ "CMP $src1,$src2\t\t! long\n" 9890 "\tBLT,a,pn done\n" 9891 "\tMOV -1,$dst\t! delay slot\n" 9892 "\tBGT,a,pn done\n" 9893 "\tMOV 1,$dst\t! delay slot\n" 9894 "\tCLR $dst\n" 9895 "done:" %} 9896 ins_encode( cmpl_flag(src1,src2,dst) ); 9897 ins_pipe(cmpL_reg); 9898 %} 9899 9900 // Conditional move 9901 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{ 9902 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9903 ins_cost(150); 9904 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9905 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9906 ins_pipe(ialu_reg); 9907 %} 9908 9909 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{ 9910 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src))); 9911 ins_cost(140); 9912 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %} 9913 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9914 ins_pipe(ialu_imm); 9915 %} 9916 9917 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{ 9918 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9919 ins_cost(150); 9920 format %{ "MOV$cmp $xcc,$src,$dst" %} 9921 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9922 ins_pipe(ialu_reg); 9923 %} 9924 9925 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{ 9926 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src))); 9927 ins_cost(140); 9928 format %{ "MOV$cmp $xcc,$src,$dst" %} 9929 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9930 ins_pipe(ialu_imm); 9931 %} 9932 9933 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{ 9934 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src))); 9935 ins_cost(150); 9936 format %{ "MOV$cmp $xcc,$src,$dst" %} 9937 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9938 ins_pipe(ialu_reg); 9939 %} 9940 9941 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{ 9942 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9943 ins_cost(150); 9944 format %{ "MOV$cmp $xcc,$src,$dst" %} 9945 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) ); 9946 ins_pipe(ialu_reg); 9947 %} 9948 9949 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{ 9950 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src))); 9951 ins_cost(140); 9952 format %{ "MOV$cmp $xcc,$src,$dst" %} 9953 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) ); 9954 ins_pipe(ialu_imm); 9955 %} 9956 9957 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{ 9958 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src))); 9959 ins_cost(150); 9960 opcode(0x101); 9961 format %{ "FMOVS$cmp $xcc,$src,$dst" %} 9962 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9963 ins_pipe(int_conditional_float_move); 9964 %} 9965 9966 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{ 9967 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src))); 9968 ins_cost(150); 9969 opcode(0x102); 9970 format %{ "FMOVD$cmp $xcc,$src,$dst" %} 9971 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) ); 9972 ins_pipe(int_conditional_float_move); 9973 %} 9974 9975 // ============================================================================ 9976 // Safepoint Instruction 9977 instruct safePoint_poll(iRegP poll) %{ 9978 match(SafePoint poll); 9979 effect(USE poll); 9980 9981 size(4); 9982 #ifdef _LP64 9983 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %} 9984 #else 9985 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %} 9986 #endif 9987 ins_encode %{ 9988 __ relocate(relocInfo::poll_type); 9989 __ ld_ptr($poll$$Register, 0, G0); 9990 %} 9991 ins_pipe(loadPollP); 9992 %} 9993 9994 // ============================================================================ 9995 // Call Instructions 9996 // Call Java Static Instruction 9997 instruct CallStaticJavaDirect( method meth ) %{ 9998 match(CallStaticJava); 9999 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke()); 10000 effect(USE meth); 10001 10002 size(8); 10003 ins_cost(CALL_COST); 10004 format %{ "CALL,static ; NOP ==> " %} 10005 ins_encode( Java_Static_Call( meth ), call_epilog ); 10006 ins_avoid_back_to_back(AVOID_BEFORE); 10007 ins_pipe(simple_call); 10008 %} 10009 10010 // Call Java Static Instruction (method handle version) 10011 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{ 10012 match(CallStaticJava); 10013 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 10014 effect(USE meth, KILL l7_mh_SP_save); 10015 10016 size(16); 10017 ins_cost(CALL_COST); 10018 format %{ "CALL,static/MethodHandle" %} 10019 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog); 10020 ins_pipe(simple_call); 10021 %} 10022 10023 // Call Java Dynamic Instruction 10024 instruct CallDynamicJavaDirect( method meth ) %{ 10025 match(CallDynamicJava); 10026 effect(USE meth); 10027 10028 ins_cost(CALL_COST); 10029 format %{ "SET (empty),R_G5\n\t" 10030 "CALL,dynamic ; NOP ==> " %} 10031 ins_encode( Java_Dynamic_Call( meth ), call_epilog ); 10032 ins_pipe(call); 10033 %} 10034 10035 // Call Runtime Instruction 10036 instruct CallRuntimeDirect(method meth, l7RegP l7) %{ 10037 match(CallRuntime); 10038 effect(USE meth, KILL l7); 10039 ins_cost(CALL_COST); 10040 format %{ "CALL,runtime" %} 10041 ins_encode( Java_To_Runtime( meth ), 10042 call_epilog, adjust_long_from_native_call ); 10043 ins_avoid_back_to_back(AVOID_BEFORE); 10044 ins_pipe(simple_call); 10045 %} 10046 10047 // Call runtime without safepoint - same as CallRuntime 10048 instruct CallLeafDirect(method meth, l7RegP l7) %{ 10049 match(CallLeaf); 10050 effect(USE meth, KILL l7); 10051 ins_cost(CALL_COST); 10052 format %{ "CALL,runtime leaf" %} 10053 ins_encode( Java_To_Runtime( meth ), 10054 call_epilog, 10055 adjust_long_from_native_call ); 10056 ins_avoid_back_to_back(AVOID_BEFORE); 10057 ins_pipe(simple_call); 10058 %} 10059 10060 // Call runtime without safepoint - same as CallLeaf 10061 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{ 10062 match(CallLeafNoFP); 10063 effect(USE meth, KILL l7); 10064 ins_cost(CALL_COST); 10065 format %{ "CALL,runtime leaf nofp" %} 10066 ins_encode( Java_To_Runtime( meth ), 10067 call_epilog, 10068 adjust_long_from_native_call ); 10069 ins_avoid_back_to_back(AVOID_BEFORE); 10070 ins_pipe(simple_call); 10071 %} 10072 10073 // Tail Call; Jump from runtime stub to Java code. 10074 // Also known as an 'interprocedural jump'. 10075 // Target of jump will eventually return to caller. 10076 // TailJump below removes the return address. 10077 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{ 10078 match(TailCall jump_target method_oop ); 10079 10080 ins_cost(CALL_COST); 10081 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %} 10082 ins_encode(form_jmpl(jump_target)); 10083 ins_avoid_back_to_back(AVOID_BEFORE); 10084 ins_pipe(tail_call); 10085 %} 10086 10087 10088 // Return Instruction 10089 instruct Ret() %{ 10090 match(Return); 10091 10092 // The epilogue node did the ret already. 10093 size(0); 10094 format %{ "! return" %} 10095 ins_encode(); 10096 ins_pipe(empty); 10097 %} 10098 10099 10100 // Tail Jump; remove the return address; jump to target. 10101 // TailCall above leaves the return address around. 10102 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 10103 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 10104 // "restore" before this instruction (in Epilogue), we need to materialize it 10105 // in %i0. 10106 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{ 10107 match( TailJump jump_target ex_oop ); 10108 ins_cost(CALL_COST); 10109 format %{ "! discard R_O7\n\t" 10110 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %} 10111 ins_encode(form_jmpl_set_exception_pc(jump_target)); 10112 // opcode(Assembler::jmpl_op3, Assembler::arith_op); 10113 // The hack duplicates the exception oop into G3, so that CreateEx can use it there. 10114 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() ); 10115 ins_avoid_back_to_back(AVOID_BEFORE); 10116 ins_pipe(tail_call); 10117 %} 10118 10119 // Create exception oop: created by stack-crawling runtime code. 10120 // Created exception is now available to this handler, and is setup 10121 // just prior to jumping to this handler. No code emitted. 10122 instruct CreateException( o0RegP ex_oop ) 10123 %{ 10124 match(Set ex_oop (CreateEx)); 10125 ins_cost(0); 10126 10127 size(0); 10128 // use the following format syntax 10129 format %{ "! exception oop is in R_O0; no code emitted" %} 10130 ins_encode(); 10131 ins_pipe(empty); 10132 %} 10133 10134 10135 // Rethrow exception: 10136 // The exception oop will come in the first argument position. 10137 // Then JUMP (not call) to the rethrow stub code. 10138 instruct RethrowException() 10139 %{ 10140 match(Rethrow); 10141 ins_cost(CALL_COST); 10142 10143 // use the following format syntax 10144 format %{ "Jmp rethrow_stub" %} 10145 ins_encode(enc_rethrow); 10146 ins_avoid_back_to_back(AVOID_BEFORE); 10147 ins_pipe(tail_call); 10148 %} 10149 10150 10151 // Die now 10152 instruct ShouldNotReachHere( ) 10153 %{ 10154 match(Halt); 10155 ins_cost(CALL_COST); 10156 10157 size(4); 10158 // Use the following format syntax 10159 format %{ "ILLTRAP ; ShouldNotReachHere" %} 10160 ins_encode( form2_illtrap() ); 10161 ins_pipe(tail_call); 10162 %} 10163 10164 // ============================================================================ 10165 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 10166 // array for an instance of the superklass. Set a hidden internal cache on a 10167 // hit (cache is checked with exposed code in gen_subtype_check()). Return 10168 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 10169 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{ 10170 match(Set index (PartialSubtypeCheck sub super)); 10171 effect( KILL pcc, KILL o7 ); 10172 ins_cost(DEFAULT_COST*10); 10173 format %{ "CALL PartialSubtypeCheck\n\tNOP" %} 10174 ins_encode( enc_PartialSubtypeCheck() ); 10175 ins_avoid_back_to_back(AVOID_BEFORE); 10176 ins_pipe(partial_subtype_check_pipe); 10177 %} 10178 10179 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{ 10180 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero)); 10181 effect( KILL idx, KILL o7 ); 10182 ins_cost(DEFAULT_COST*10); 10183 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %} 10184 ins_encode( enc_PartialSubtypeCheck() ); 10185 ins_avoid_back_to_back(AVOID_BEFORE); 10186 ins_pipe(partial_subtype_check_pipe); 10187 %} 10188 10189 10190 // ============================================================================ 10191 // inlined locking and unlocking 10192 10193 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10194 match(Set pcc (FastLock object box)); 10195 10196 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10197 ins_cost(100); 10198 10199 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10200 ins_encode( Fast_Lock(object, box, scratch, scratch2) ); 10201 ins_pipe(long_memory_op); 10202 %} 10203 10204 10205 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{ 10206 match(Set pcc (FastUnlock object box)); 10207 effect(TEMP scratch2, USE_KILL box, KILL scratch); 10208 ins_cost(100); 10209 10210 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %} 10211 ins_encode( Fast_Unlock(object, box, scratch, scratch2) ); 10212 ins_pipe(long_memory_op); 10213 %} 10214 10215 // The encodings are generic. 10216 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{ 10217 predicate(!use_block_zeroing(n->in(2)) ); 10218 match(Set dummy (ClearArray cnt base)); 10219 effect(TEMP temp, KILL ccr); 10220 ins_cost(300); 10221 format %{ "MOV $cnt,$temp\n" 10222 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n" 10223 " BRge loop\t\t! Clearing loop\n" 10224 " STX G0,[$base+$temp]\t! delay slot" %} 10225 10226 ins_encode %{ 10227 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 10228 Register nof_bytes_arg = $cnt$$Register; 10229 Register nof_bytes_tmp = $temp$$Register; 10230 Register base_pointer_arg = $base$$Register; 10231 10232 Label loop; 10233 __ mov(nof_bytes_arg, nof_bytes_tmp); 10234 10235 // Loop and clear, walking backwards through the array. 10236 // nof_bytes_tmp (if >0) is always the number of bytes to zero 10237 __ bind(loop); 10238 __ deccc(nof_bytes_tmp, 8); 10239 __ br(Assembler::greaterEqual, true, Assembler::pt, loop); 10240 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp); 10241 // %%%% this mini-loop must not cross a cache boundary! 10242 %} 10243 ins_pipe(long_memory_op); 10244 %} 10245 10246 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{ 10247 predicate(use_block_zeroing(n->in(2))); 10248 match(Set dummy (ClearArray cnt base)); 10249 effect(USE_KILL cnt, USE_KILL base, KILL ccr); 10250 ins_cost(300); 10251 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10252 10253 ins_encode %{ 10254 10255 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10256 Register to = $base$$Register; 10257 Register count = $cnt$$Register; 10258 10259 Label Ldone; 10260 __ nop(); // Separate short branches 10261 // Use BIS for zeroing (temp is not used). 10262 __ bis_zeroing(to, count, G0, Ldone); 10263 __ bind(Ldone); 10264 10265 %} 10266 ins_pipe(long_memory_op); 10267 %} 10268 10269 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{ 10270 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit)); 10271 match(Set dummy (ClearArray cnt base)); 10272 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr); 10273 ins_cost(300); 10274 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %} 10275 10276 ins_encode %{ 10277 10278 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation"); 10279 Register to = $base$$Register; 10280 Register count = $cnt$$Register; 10281 Register temp = $tmp$$Register; 10282 10283 Label Ldone; 10284 __ nop(); // Separate short branches 10285 // Use BIS for zeroing 10286 __ bis_zeroing(to, count, temp, Ldone); 10287 __ bind(Ldone); 10288 10289 %} 10290 ins_pipe(long_memory_op); 10291 %} 10292 10293 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result, 10294 o7RegI tmp, flagsReg ccr) %{ 10295 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 10296 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp); 10297 ins_cost(300); 10298 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %} 10299 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) ); 10300 ins_pipe(long_memory_op); 10301 %} 10302 10303 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result, 10304 o7RegI tmp, flagsReg ccr) %{ 10305 match(Set result (StrEquals (Binary str1 str2) cnt)); 10306 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr); 10307 ins_cost(300); 10308 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %} 10309 ins_encode( enc_String_Equals(str1, str2, cnt, result) ); 10310 ins_pipe(long_memory_op); 10311 %} 10312 10313 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result, 10314 o7RegI tmp2, flagsReg ccr) %{ 10315 match(Set result (AryEq ary1 ary2)); 10316 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr); 10317 ins_cost(300); 10318 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %} 10319 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result)); 10320 ins_pipe(long_memory_op); 10321 %} 10322 10323 10324 //---------- Zeros Count Instructions ------------------------------------------ 10325 10326 instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{ 10327 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10328 match(Set dst (CountLeadingZerosI src)); 10329 effect(TEMP dst, TEMP tmp, KILL cr); 10330 10331 // x |= (x >> 1); 10332 // x |= (x >> 2); 10333 // x |= (x >> 4); 10334 // x |= (x >> 8); 10335 // x |= (x >> 16); 10336 // return (WORDBITS - popc(x)); 10337 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t" 10338 "SRL $src,0,$dst\t! 32-bit zero extend\n\t" 10339 "OR $dst,$tmp,$dst\n\t" 10340 "SRL $dst,2,$tmp\n\t" 10341 "OR $dst,$tmp,$dst\n\t" 10342 "SRL $dst,4,$tmp\n\t" 10343 "OR $dst,$tmp,$dst\n\t" 10344 "SRL $dst,8,$tmp\n\t" 10345 "OR $dst,$tmp,$dst\n\t" 10346 "SRL $dst,16,$tmp\n\t" 10347 "OR $dst,$tmp,$dst\n\t" 10348 "POPC $dst,$dst\n\t" 10349 "MOV 32,$tmp\n\t" 10350 "SUB $tmp,$dst,$dst" %} 10351 ins_encode %{ 10352 Register Rdst = $dst$$Register; 10353 Register Rsrc = $src$$Register; 10354 Register Rtmp = $tmp$$Register; 10355 __ srl(Rsrc, 1, Rtmp); 10356 __ srl(Rsrc, 0, Rdst); 10357 __ or3(Rdst, Rtmp, Rdst); 10358 __ srl(Rdst, 2, Rtmp); 10359 __ or3(Rdst, Rtmp, Rdst); 10360 __ srl(Rdst, 4, Rtmp); 10361 __ or3(Rdst, Rtmp, Rdst); 10362 __ srl(Rdst, 8, Rtmp); 10363 __ or3(Rdst, Rtmp, Rdst); 10364 __ srl(Rdst, 16, Rtmp); 10365 __ or3(Rdst, Rtmp, Rdst); 10366 __ popc(Rdst, Rdst); 10367 __ mov(BitsPerInt, Rtmp); 10368 __ sub(Rtmp, Rdst, Rdst); 10369 %} 10370 ins_pipe(ialu_reg); 10371 %} 10372 10373 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{ 10374 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10375 match(Set dst (CountLeadingZerosL src)); 10376 effect(TEMP dst, TEMP tmp, KILL cr); 10377 10378 // x |= (x >> 1); 10379 // x |= (x >> 2); 10380 // x |= (x >> 4); 10381 // x |= (x >> 8); 10382 // x |= (x >> 16); 10383 // x |= (x >> 32); 10384 // return (WORDBITS - popc(x)); 10385 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t" 10386 "OR $src,$tmp,$dst\n\t" 10387 "SRLX $dst,2,$tmp\n\t" 10388 "OR $dst,$tmp,$dst\n\t" 10389 "SRLX $dst,4,$tmp\n\t" 10390 "OR $dst,$tmp,$dst\n\t" 10391 "SRLX $dst,8,$tmp\n\t" 10392 "OR $dst,$tmp,$dst\n\t" 10393 "SRLX $dst,16,$tmp\n\t" 10394 "OR $dst,$tmp,$dst\n\t" 10395 "SRLX $dst,32,$tmp\n\t" 10396 "OR $dst,$tmp,$dst\n\t" 10397 "POPC $dst,$dst\n\t" 10398 "MOV 64,$tmp\n\t" 10399 "SUB $tmp,$dst,$dst" %} 10400 ins_encode %{ 10401 Register Rdst = $dst$$Register; 10402 Register Rsrc = $src$$Register; 10403 Register Rtmp = $tmp$$Register; 10404 __ srlx(Rsrc, 1, Rtmp); 10405 __ or3( Rsrc, Rtmp, Rdst); 10406 __ srlx(Rdst, 2, Rtmp); 10407 __ or3( Rdst, Rtmp, Rdst); 10408 __ srlx(Rdst, 4, Rtmp); 10409 __ or3( Rdst, Rtmp, Rdst); 10410 __ srlx(Rdst, 8, Rtmp); 10411 __ or3( Rdst, Rtmp, Rdst); 10412 __ srlx(Rdst, 16, Rtmp); 10413 __ or3( Rdst, Rtmp, Rdst); 10414 __ srlx(Rdst, 32, Rtmp); 10415 __ or3( Rdst, Rtmp, Rdst); 10416 __ popc(Rdst, Rdst); 10417 __ mov(BitsPerLong, Rtmp); 10418 __ sub(Rtmp, Rdst, Rdst); 10419 %} 10420 ins_pipe(ialu_reg); 10421 %} 10422 10423 instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{ 10424 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10425 match(Set dst (CountTrailingZerosI src)); 10426 effect(TEMP dst, KILL cr); 10427 10428 // return popc(~x & (x - 1)); 10429 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t" 10430 "ANDN $dst,$src,$dst\n\t" 10431 "SRL $dst,R_G0,$dst\n\t" 10432 "POPC $dst,$dst" %} 10433 ins_encode %{ 10434 Register Rdst = $dst$$Register; 10435 Register Rsrc = $src$$Register; 10436 __ sub(Rsrc, 1, Rdst); 10437 __ andn(Rdst, Rsrc, Rdst); 10438 __ srl(Rdst, G0, Rdst); 10439 __ popc(Rdst, Rdst); 10440 %} 10441 ins_pipe(ialu_reg); 10442 %} 10443 10444 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{ 10445 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported 10446 match(Set dst (CountTrailingZerosL src)); 10447 effect(TEMP dst, KILL cr); 10448 10449 // return popc(~x & (x - 1)); 10450 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t" 10451 "ANDN $dst,$src,$dst\n\t" 10452 "POPC $dst,$dst" %} 10453 ins_encode %{ 10454 Register Rdst = $dst$$Register; 10455 Register Rsrc = $src$$Register; 10456 __ sub(Rsrc, 1, Rdst); 10457 __ andn(Rdst, Rsrc, Rdst); 10458 __ popc(Rdst, Rdst); 10459 %} 10460 ins_pipe(ialu_reg); 10461 %} 10462 10463 10464 //---------- Population Count Instructions ------------------------------------- 10465 10466 instruct popCountI(iRegIsafe dst, iRegI src) %{ 10467 predicate(UsePopCountInstruction); 10468 match(Set dst (PopCountI src)); 10469 10470 format %{ "SRL $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t" 10471 "POPC $dst, $dst" %} 10472 ins_encode %{ 10473 __ srl($src$$Register, G0, $dst$$Register); 10474 __ popc($dst$$Register, $dst$$Register); 10475 %} 10476 ins_pipe(ialu_reg); 10477 %} 10478 10479 // Note: Long.bitCount(long) returns an int. 10480 instruct popCountL(iRegIsafe dst, iRegL src) %{ 10481 predicate(UsePopCountInstruction); 10482 match(Set dst (PopCountL src)); 10483 10484 format %{ "POPC $src, $dst" %} 10485 ins_encode %{ 10486 __ popc($src$$Register, $dst$$Register); 10487 %} 10488 ins_pipe(ialu_reg); 10489 %} 10490 10491 10492 // ============================================================================ 10493 //------------Bytes reverse-------------------------------------------------- 10494 10495 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{ 10496 match(Set dst (ReverseBytesI src)); 10497 10498 // Op cost is artificially doubled to make sure that load or store 10499 // instructions are preferred over this one which requires a spill 10500 // onto a stack slot. 10501 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10502 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10503 10504 ins_encode %{ 10505 __ set($src$$disp + STACK_BIAS, O7); 10506 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10507 %} 10508 ins_pipe( iload_mem ); 10509 %} 10510 10511 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{ 10512 match(Set dst (ReverseBytesL src)); 10513 10514 // Op cost is artificially doubled to make sure that load or store 10515 // instructions are preferred over this one which requires a spill 10516 // onto a stack slot. 10517 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10518 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10519 10520 ins_encode %{ 10521 __ set($src$$disp + STACK_BIAS, O7); 10522 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10523 %} 10524 ins_pipe( iload_mem ); 10525 %} 10526 10527 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{ 10528 match(Set dst (ReverseBytesUS src)); 10529 10530 // Op cost is artificially doubled to make sure that load or store 10531 // instructions are preferred over this one which requires a spill 10532 // onto a stack slot. 10533 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10534 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %} 10535 10536 ins_encode %{ 10537 // the value was spilled as an int so bias the load 10538 __ set($src$$disp + STACK_BIAS + 2, O7); 10539 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10540 %} 10541 ins_pipe( iload_mem ); 10542 %} 10543 10544 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{ 10545 match(Set dst (ReverseBytesS src)); 10546 10547 // Op cost is artificially doubled to make sure that load or store 10548 // instructions are preferred over this one which requires a spill 10549 // onto a stack slot. 10550 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST); 10551 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %} 10552 10553 ins_encode %{ 10554 // the value was spilled as an int so bias the load 10555 __ set($src$$disp + STACK_BIAS + 2, O7); 10556 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10557 %} 10558 ins_pipe( iload_mem ); 10559 %} 10560 10561 // Load Integer reversed byte order 10562 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{ 10563 match(Set dst (ReverseBytesI (LoadI src))); 10564 10565 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 10566 size(4); 10567 format %{ "LDUWA $src, $dst\t!asi=primary_little" %} 10568 10569 ins_encode %{ 10570 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10571 %} 10572 ins_pipe(iload_mem); 10573 %} 10574 10575 // Load Long - aligned and reversed 10576 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{ 10577 match(Set dst (ReverseBytesL (LoadL src))); 10578 10579 ins_cost(MEMORY_REF_COST); 10580 size(4); 10581 format %{ "LDXA $src, $dst\t!asi=primary_little" %} 10582 10583 ins_encode %{ 10584 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10585 %} 10586 ins_pipe(iload_mem); 10587 %} 10588 10589 // Load unsigned short / char reversed byte order 10590 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{ 10591 match(Set dst (ReverseBytesUS (LoadUS src))); 10592 10593 ins_cost(MEMORY_REF_COST); 10594 size(4); 10595 format %{ "LDUHA $src, $dst\t!asi=primary_little" %} 10596 10597 ins_encode %{ 10598 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10599 %} 10600 ins_pipe(iload_mem); 10601 %} 10602 10603 // Load short reversed byte order 10604 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{ 10605 match(Set dst (ReverseBytesS (LoadS src))); 10606 10607 ins_cost(MEMORY_REF_COST); 10608 size(4); 10609 format %{ "LDSHA $src, $dst\t!asi=primary_little" %} 10610 10611 ins_encode %{ 10612 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register); 10613 %} 10614 ins_pipe(iload_mem); 10615 %} 10616 10617 // Store Integer reversed byte order 10618 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{ 10619 match(Set dst (StoreI dst (ReverseBytesI src))); 10620 10621 ins_cost(MEMORY_REF_COST); 10622 size(4); 10623 format %{ "STWA $src, $dst\t!asi=primary_little" %} 10624 10625 ins_encode %{ 10626 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10627 %} 10628 ins_pipe(istore_mem_reg); 10629 %} 10630 10631 // Store Long reversed byte order 10632 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{ 10633 match(Set dst (StoreL dst (ReverseBytesL src))); 10634 10635 ins_cost(MEMORY_REF_COST); 10636 size(4); 10637 format %{ "STXA $src, $dst\t!asi=primary_little" %} 10638 10639 ins_encode %{ 10640 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10641 %} 10642 ins_pipe(istore_mem_reg); 10643 %} 10644 10645 // Store unsighed short/char reversed byte order 10646 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{ 10647 match(Set dst (StoreC dst (ReverseBytesUS src))); 10648 10649 ins_cost(MEMORY_REF_COST); 10650 size(4); 10651 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10652 10653 ins_encode %{ 10654 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10655 %} 10656 ins_pipe(istore_mem_reg); 10657 %} 10658 10659 // Store short reversed byte order 10660 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{ 10661 match(Set dst (StoreC dst (ReverseBytesS src))); 10662 10663 ins_cost(MEMORY_REF_COST); 10664 size(4); 10665 format %{ "STHA $src, $dst\t!asi=primary_little" %} 10666 10667 ins_encode %{ 10668 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE); 10669 %} 10670 ins_pipe(istore_mem_reg); 10671 %} 10672 10673 // ====================VECTOR INSTRUCTIONS===================================== 10674 10675 // Load Aligned Packed values into a Double Register 10676 instruct loadV8(regD dst, memory mem) %{ 10677 predicate(n->as_LoadVector()->memory_size() == 8); 10678 match(Set dst (LoadVector mem)); 10679 ins_cost(MEMORY_REF_COST); 10680 size(4); 10681 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %} 10682 ins_encode %{ 10683 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg)); 10684 %} 10685 ins_pipe(floadD_mem); 10686 %} 10687 10688 // Store Vector in Double register to memory 10689 instruct storeV8(memory mem, regD src) %{ 10690 predicate(n->as_StoreVector()->memory_size() == 8); 10691 match(Set mem (StoreVector mem src)); 10692 ins_cost(MEMORY_REF_COST); 10693 size(4); 10694 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %} 10695 ins_encode %{ 10696 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address); 10697 %} 10698 ins_pipe(fstoreD_mem_reg); 10699 %} 10700 10701 // Store Zero into vector in memory 10702 instruct storeV8B_zero(memory mem, immI0 zero) %{ 10703 predicate(n->as_StoreVector()->memory_size() == 8); 10704 match(Set mem (StoreVector mem (ReplicateB zero))); 10705 ins_cost(MEMORY_REF_COST); 10706 size(4); 10707 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %} 10708 ins_encode %{ 10709 __ stx(G0, $mem$$Address); 10710 %} 10711 ins_pipe(fstoreD_mem_zero); 10712 %} 10713 10714 instruct storeV4S_zero(memory mem, immI0 zero) %{ 10715 predicate(n->as_StoreVector()->memory_size() == 8); 10716 match(Set mem (StoreVector mem (ReplicateS zero))); 10717 ins_cost(MEMORY_REF_COST); 10718 size(4); 10719 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %} 10720 ins_encode %{ 10721 __ stx(G0, $mem$$Address); 10722 %} 10723 ins_pipe(fstoreD_mem_zero); 10724 %} 10725 10726 instruct storeV2I_zero(memory mem, immI0 zero) %{ 10727 predicate(n->as_StoreVector()->memory_size() == 8); 10728 match(Set mem (StoreVector mem (ReplicateI zero))); 10729 ins_cost(MEMORY_REF_COST); 10730 size(4); 10731 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %} 10732 ins_encode %{ 10733 __ stx(G0, $mem$$Address); 10734 %} 10735 ins_pipe(fstoreD_mem_zero); 10736 %} 10737 10738 instruct storeV2F_zero(memory mem, immF0 zero) %{ 10739 predicate(n->as_StoreVector()->memory_size() == 8); 10740 match(Set mem (StoreVector mem (ReplicateF zero))); 10741 ins_cost(MEMORY_REF_COST); 10742 size(4); 10743 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %} 10744 ins_encode %{ 10745 __ stx(G0, $mem$$Address); 10746 %} 10747 ins_pipe(fstoreD_mem_zero); 10748 %} 10749 10750 // Replicate scalar to packed byte values into Double register 10751 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10752 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3); 10753 match(Set dst (ReplicateB src)); 10754 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10755 format %{ "SLLX $src,56,$tmp\n\t" 10756 "SRLX $tmp, 8,$tmp2\n\t" 10757 "OR $tmp,$tmp2,$tmp\n\t" 10758 "SRLX $tmp,16,$tmp2\n\t" 10759 "OR $tmp,$tmp2,$tmp\n\t" 10760 "SRLX $tmp,32,$tmp2\n\t" 10761 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10762 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10763 ins_encode %{ 10764 Register Rsrc = $src$$Register; 10765 Register Rtmp = $tmp$$Register; 10766 Register Rtmp2 = $tmp2$$Register; 10767 __ sllx(Rsrc, 56, Rtmp); 10768 __ srlx(Rtmp, 8, Rtmp2); 10769 __ or3 (Rtmp, Rtmp2, Rtmp); 10770 __ srlx(Rtmp, 16, Rtmp2); 10771 __ or3 (Rtmp, Rtmp2, Rtmp); 10772 __ srlx(Rtmp, 32, Rtmp2); 10773 __ or3 (Rtmp, Rtmp2, Rtmp); 10774 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10775 %} 10776 ins_pipe(ialu_reg); 10777 %} 10778 10779 // Replicate scalar to packed byte values into Double stack 10780 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10781 predicate(n->as_Vector()->length() == 8 && UseVIS < 3); 10782 match(Set dst (ReplicateB src)); 10783 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10784 format %{ "SLLX $src,56,$tmp\n\t" 10785 "SRLX $tmp, 8,$tmp2\n\t" 10786 "OR $tmp,$tmp2,$tmp\n\t" 10787 "SRLX $tmp,16,$tmp2\n\t" 10788 "OR $tmp,$tmp2,$tmp\n\t" 10789 "SRLX $tmp,32,$tmp2\n\t" 10790 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t" 10791 "STX $tmp,$dst\t! regL to stkD" %} 10792 ins_encode %{ 10793 Register Rsrc = $src$$Register; 10794 Register Rtmp = $tmp$$Register; 10795 Register Rtmp2 = $tmp2$$Register; 10796 __ sllx(Rsrc, 56, Rtmp); 10797 __ srlx(Rtmp, 8, Rtmp2); 10798 __ or3 (Rtmp, Rtmp2, Rtmp); 10799 __ srlx(Rtmp, 16, Rtmp2); 10800 __ or3 (Rtmp, Rtmp2, Rtmp); 10801 __ srlx(Rtmp, 32, Rtmp2); 10802 __ or3 (Rtmp, Rtmp2, Rtmp); 10803 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10804 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10805 %} 10806 ins_pipe(ialu_reg); 10807 %} 10808 10809 // Replicate scalar constant to packed byte values in Double register 10810 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 10811 predicate(n->as_Vector()->length() == 8); 10812 match(Set dst (ReplicateB con)); 10813 effect(KILL tmp); 10814 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 10815 ins_encode %{ 10816 // XXX This is a quick fix for 6833573. 10817 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 10818 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 10819 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10820 %} 10821 ins_pipe(loadConFD); 10822 %} 10823 10824 // Replicate scalar to packed char/short values into Double register 10825 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10826 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3); 10827 match(Set dst (ReplicateS src)); 10828 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10829 format %{ "SLLX $src,48,$tmp\n\t" 10830 "SRLX $tmp,16,$tmp2\n\t" 10831 "OR $tmp,$tmp2,$tmp\n\t" 10832 "SRLX $tmp,32,$tmp2\n\t" 10833 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10834 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10835 ins_encode %{ 10836 Register Rsrc = $src$$Register; 10837 Register Rtmp = $tmp$$Register; 10838 Register Rtmp2 = $tmp2$$Register; 10839 __ sllx(Rsrc, 48, Rtmp); 10840 __ srlx(Rtmp, 16, Rtmp2); 10841 __ or3 (Rtmp, Rtmp2, Rtmp); 10842 __ srlx(Rtmp, 32, Rtmp2); 10843 __ or3 (Rtmp, Rtmp2, Rtmp); 10844 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10845 %} 10846 ins_pipe(ialu_reg); 10847 %} 10848 10849 // Replicate scalar to packed char/short values into Double stack 10850 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10851 predicate(n->as_Vector()->length() == 4 && UseVIS < 3); 10852 match(Set dst (ReplicateS src)); 10853 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10854 format %{ "SLLX $src,48,$tmp\n\t" 10855 "SRLX $tmp,16,$tmp2\n\t" 10856 "OR $tmp,$tmp2,$tmp\n\t" 10857 "SRLX $tmp,32,$tmp2\n\t" 10858 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t" 10859 "STX $tmp,$dst\t! regL to stkD" %} 10860 ins_encode %{ 10861 Register Rsrc = $src$$Register; 10862 Register Rtmp = $tmp$$Register; 10863 Register Rtmp2 = $tmp2$$Register; 10864 __ sllx(Rsrc, 48, Rtmp); 10865 __ srlx(Rtmp, 16, Rtmp2); 10866 __ or3 (Rtmp, Rtmp2, Rtmp); 10867 __ srlx(Rtmp, 32, Rtmp2); 10868 __ or3 (Rtmp, Rtmp2, Rtmp); 10869 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10870 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10871 %} 10872 ins_pipe(ialu_reg); 10873 %} 10874 10875 // Replicate scalar constant to packed char/short values in Double register 10876 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 10877 predicate(n->as_Vector()->length() == 4); 10878 match(Set dst (ReplicateS con)); 10879 effect(KILL tmp); 10880 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 10881 ins_encode %{ 10882 // XXX This is a quick fix for 6833573. 10883 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 10884 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 10885 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10886 %} 10887 ins_pipe(loadConFD); 10888 %} 10889 10890 // Replicate scalar to packed int values into Double register 10891 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10892 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3); 10893 match(Set dst (ReplicateI src)); 10894 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10895 format %{ "SLLX $src,32,$tmp\n\t" 10896 "SRLX $tmp,32,$tmp2\n\t" 10897 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10898 "MOVXTOD $tmp,$dst\t! MoveL2D" %} 10899 ins_encode %{ 10900 Register Rsrc = $src$$Register; 10901 Register Rtmp = $tmp$$Register; 10902 Register Rtmp2 = $tmp2$$Register; 10903 __ sllx(Rsrc, 32, Rtmp); 10904 __ srlx(Rtmp, 32, Rtmp2); 10905 __ or3 (Rtmp, Rtmp2, Rtmp); 10906 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg)); 10907 %} 10908 ins_pipe(ialu_reg); 10909 %} 10910 10911 // Replicate scalar to packed int values into Double stack 10912 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{ 10913 predicate(n->as_Vector()->length() == 2 && UseVIS < 3); 10914 match(Set dst (ReplicateI src)); 10915 effect(DEF dst, USE src, TEMP tmp, KILL tmp2); 10916 format %{ "SLLX $src,32,$tmp\n\t" 10917 "SRLX $tmp,32,$tmp2\n\t" 10918 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t" 10919 "STX $tmp,$dst\t! regL to stkD" %} 10920 ins_encode %{ 10921 Register Rsrc = $src$$Register; 10922 Register Rtmp = $tmp$$Register; 10923 Register Rtmp2 = $tmp2$$Register; 10924 __ sllx(Rsrc, 32, Rtmp); 10925 __ srlx(Rtmp, 32, Rtmp2); 10926 __ or3 (Rtmp, Rtmp2, Rtmp); 10927 __ set ($dst$$disp + STACK_BIAS, Rtmp2); 10928 __ stx (Rtmp, Rtmp2, $dst$$base$$Register); 10929 %} 10930 ins_pipe(ialu_reg); 10931 %} 10932 10933 // Replicate scalar zero constant to packed int values in Double register 10934 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 10935 predicate(n->as_Vector()->length() == 2); 10936 match(Set dst (ReplicateI con)); 10937 effect(KILL tmp); 10938 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 10939 ins_encode %{ 10940 // XXX This is a quick fix for 6833573. 10941 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 10942 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 10943 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10944 %} 10945 ins_pipe(loadConFD); 10946 %} 10947 10948 // Replicate scalar to packed float values into Double stack 10949 instruct Repl2F_stk(stackSlotD dst, regF src) %{ 10950 predicate(n->as_Vector()->length() == 2); 10951 match(Set dst (ReplicateF src)); 10952 ins_cost(MEMORY_REF_COST*2); 10953 format %{ "STF $src,$dst.hi\t! packed2F\n\t" 10954 "STF $src,$dst.lo" %} 10955 opcode(Assembler::stf_op3); 10956 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src)); 10957 ins_pipe(fstoreF_stk_reg); 10958 %} 10959 10960 // Replicate scalar zero constant to packed float values in Double register 10961 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{ 10962 predicate(n->as_Vector()->length() == 2); 10963 match(Set dst (ReplicateF con)); 10964 effect(KILL tmp); 10965 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %} 10966 ins_encode %{ 10967 // XXX This is a quick fix for 6833573. 10968 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister); 10969 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register); 10970 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 10971 %} 10972 ins_pipe(loadConFD); 10973 %} 10974 10975 //----------PEEPHOLE RULES----------------------------------------------------- 10976 // These must follow all instruction definitions as they use the names 10977 // defined in the instructions definitions. 10978 // 10979 // peepmatch ( root_instr_name [preceding_instruction]* ); 10980 // 10981 // peepconstraint %{ 10982 // (instruction_number.operand_name relational_op instruction_number.operand_name 10983 // [, ...] ); 10984 // // instruction numbers are zero-based using left to right order in peepmatch 10985 // 10986 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 10987 // // provide an instruction_number.operand_name for each operand that appears 10988 // // in the replacement instruction's match rule 10989 // 10990 // ---------VM FLAGS--------------------------------------------------------- 10991 // 10992 // All peephole optimizations can be turned off using -XX:-OptoPeephole 10993 // 10994 // Each peephole rule is given an identifying number starting with zero and 10995 // increasing by one in the order seen by the parser. An individual peephole 10996 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 10997 // on the command-line. 10998 // 10999 // ---------CURRENT LIMITATIONS---------------------------------------------- 11000 // 11001 // Only match adjacent instructions in same basic block 11002 // Only equality constraints 11003 // Only constraints between operands, not (0.dest_reg == EAX_enc) 11004 // Only one replacement instruction 11005 // 11006 // ---------EXAMPLE---------------------------------------------------------- 11007 // 11008 // // pertinent parts of existing instructions in architecture description 11009 // instruct movI(eRegI dst, eRegI src) %{ 11010 // match(Set dst (CopyI src)); 11011 // %} 11012 // 11013 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 11014 // match(Set dst (AddI dst src)); 11015 // effect(KILL cr); 11016 // %} 11017 // 11018 // // Change (inc mov) to lea 11019 // peephole %{ 11020 // // increment preceeded by register-register move 11021 // peepmatch ( incI_eReg movI ); 11022 // // require that the destination register of the increment 11023 // // match the destination register of the move 11024 // peepconstraint ( 0.dst == 1.dst ); 11025 // // construct a replacement instruction that sets 11026 // // the destination to ( move's source register + one ) 11027 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) ); 11028 // %} 11029 // 11030 11031 // // Change load of spilled value to only a spill 11032 // instruct storeI(memory mem, eRegI src) %{ 11033 // match(Set mem (StoreI mem src)); 11034 // %} 11035 // 11036 // instruct loadI(eRegI dst, memory mem) %{ 11037 // match(Set dst (LoadI mem)); 11038 // %} 11039 // 11040 // peephole %{ 11041 // peepmatch ( loadI storeI ); 11042 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 11043 // peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 11044 // %} 11045 11046 //----------SMARTSPILL RULES--------------------------------------------------- 11047 // These must follow all instruction definitions as they use the names 11048 // defined in the instructions definitions. 11049 // 11050 // SPARC will probably not have any of these rules due to RISC instruction set. 11051 11052 //----------PIPELINE----------------------------------------------------------- 11053 // Rules which define the behavior of the target architectures pipeline.