1 // 2 // Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 3 // Copyright 2012, 2014 SAP AG. All rights reserved. 4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 // 6 // This code is free software; you can redistribute it and/or modify it 7 // under the terms of the GNU General Public License version 2 only, as 8 // published by the Free Software Foundation. 9 // 10 // This code is distributed in the hope that it will be useful, but WITHOUT 11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 // version 2 for more details (a copy is included in the LICENSE file that 14 // accompanied this code). 15 // 16 // You should have received a copy of the GNU General Public License version 17 // 2 along with this work; if not, write to the Free Software Foundation, 18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 // 20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 // or visit www.oracle.com if you need additional information or have any 22 // questions. 23 // 24 // 25 26 // 27 // PPC64 Architecture Description File 28 // 29 30 //----------REGISTER DEFINITION BLOCK------------------------------------------ 31 // This information is used by the matcher and the register allocator to 32 // describe individual registers and classes of registers within the target 33 // architecture. 34 register %{ 35 //----------Architecture Description Register Definitions---------------------- 36 // General Registers 37 // "reg_def" name (register save type, C convention save type, 38 // ideal register type, encoding); 39 // 40 // Register Save Types: 41 // 42 // NS = No-Save: The register allocator assumes that these registers 43 // can be used without saving upon entry to the method, & 44 // that they do not need to be saved at call sites. 45 // 46 // SOC = Save-On-Call: The register allocator assumes that these registers 47 // can be used without saving upon entry to the method, 48 // but that they must be saved at call sites. 49 // These are called "volatiles" on ppc. 50 // 51 // SOE = Save-On-Entry: The register allocator assumes that these registers 52 // must be saved before using them upon entry to the 53 // method, but they do not need to be saved at call 54 // sites. 55 // These are called "nonvolatiles" on ppc. 56 // 57 // AS = Always-Save: The register allocator assumes that these registers 58 // must be saved before using them upon entry to the 59 // method, & that they must be saved at call sites. 60 // 61 // Ideal Register Type is used to determine how to save & restore a 62 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 63 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 64 // 65 // The encoding number is the actual bit-pattern placed into the opcodes. 66 // 67 // PPC64 register definitions, based on the 64-bit PowerPC ELF ABI 68 // Supplement Version 1.7 as of 2003-10-29. 69 // 70 // For each 64-bit register we must define two registers: the register 71 // itself, e.g. R3, and a corresponding virtual other (32-bit-)'half', 72 // e.g. R3_H, which is needed by the allocator, but is not used 73 // for stores, loads, etc. 74 75 // ---------------------------- 76 // Integer/Long Registers 77 // ---------------------------- 78 79 // PPC64 has 32 64-bit integer registers. 80 81 // types: v = volatile, nv = non-volatile, s = system 82 reg_def R0 ( SOC, SOC, Op_RegI, 0, R0->as_VMReg() ); // v used in prologs 83 reg_def R0_H ( SOC, SOC, Op_RegI, 99, R0->as_VMReg()->next() ); 84 reg_def R1 ( NS, NS, Op_RegI, 1, R1->as_VMReg() ); // s SP 85 reg_def R1_H ( NS, NS, Op_RegI, 99, R1->as_VMReg()->next() ); 86 reg_def R2 ( SOC, SOC, Op_RegI, 2, R2->as_VMReg() ); // v TOC 87 reg_def R2_H ( SOC, SOC, Op_RegI, 99, R2->as_VMReg()->next() ); 88 reg_def R3 ( SOC, SOC, Op_RegI, 3, R3->as_VMReg() ); // v iarg1 & iret 89 reg_def R3_H ( SOC, SOC, Op_RegI, 99, R3->as_VMReg()->next() ); 90 reg_def R4 ( SOC, SOC, Op_RegI, 4, R4->as_VMReg() ); // iarg2 91 reg_def R4_H ( SOC, SOC, Op_RegI, 99, R4->as_VMReg()->next() ); 92 reg_def R5 ( SOC, SOC, Op_RegI, 5, R5->as_VMReg() ); // v iarg3 93 reg_def R5_H ( SOC, SOC, Op_RegI, 99, R5->as_VMReg()->next() ); 94 reg_def R6 ( SOC, SOC, Op_RegI, 6, R6->as_VMReg() ); // v iarg4 95 reg_def R6_H ( SOC, SOC, Op_RegI, 99, R6->as_VMReg()->next() ); 96 reg_def R7 ( SOC, SOC, Op_RegI, 7, R7->as_VMReg() ); // v iarg5 97 reg_def R7_H ( SOC, SOC, Op_RegI, 99, R7->as_VMReg()->next() ); 98 reg_def R8 ( SOC, SOC, Op_RegI, 8, R8->as_VMReg() ); // v iarg6 99 reg_def R8_H ( SOC, SOC, Op_RegI, 99, R8->as_VMReg()->next() ); 100 reg_def R9 ( SOC, SOC, Op_RegI, 9, R9->as_VMReg() ); // v iarg7 101 reg_def R9_H ( SOC, SOC, Op_RegI, 99, R9->as_VMReg()->next() ); 102 reg_def R10 ( SOC, SOC, Op_RegI, 10, R10->as_VMReg() ); // v iarg8 103 reg_def R10_H( SOC, SOC, Op_RegI, 99, R10->as_VMReg()->next()); 104 reg_def R11 ( SOC, SOC, Op_RegI, 11, R11->as_VMReg() ); // v ENV / scratch 105 reg_def R11_H( SOC, SOC, Op_RegI, 99, R11->as_VMReg()->next()); 106 reg_def R12 ( SOC, SOC, Op_RegI, 12, R12->as_VMReg() ); // v scratch 107 reg_def R12_H( SOC, SOC, Op_RegI, 99, R12->as_VMReg()->next()); 108 reg_def R13 ( NS, NS, Op_RegI, 13, R13->as_VMReg() ); // s system thread id 109 reg_def R13_H( NS, NS, Op_RegI, 99, R13->as_VMReg()->next()); 110 reg_def R14 ( SOC, SOE, Op_RegI, 14, R14->as_VMReg() ); // nv 111 reg_def R14_H( SOC, SOE, Op_RegI, 99, R14->as_VMReg()->next()); 112 reg_def R15 ( SOC, SOE, Op_RegI, 15, R15->as_VMReg() ); // nv 113 reg_def R15_H( SOC, SOE, Op_RegI, 99, R15->as_VMReg()->next()); 114 reg_def R16 ( SOC, SOE, Op_RegI, 16, R16->as_VMReg() ); // nv 115 reg_def R16_H( SOC, SOE, Op_RegI, 99, R16->as_VMReg()->next()); 116 reg_def R17 ( SOC, SOE, Op_RegI, 17, R17->as_VMReg() ); // nv 117 reg_def R17_H( SOC, SOE, Op_RegI, 99, R17->as_VMReg()->next()); 118 reg_def R18 ( SOC, SOE, Op_RegI, 18, R18->as_VMReg() ); // nv 119 reg_def R18_H( SOC, SOE, Op_RegI, 99, R18->as_VMReg()->next()); 120 reg_def R19 ( SOC, SOE, Op_RegI, 19, R19->as_VMReg() ); // nv 121 reg_def R19_H( SOC, SOE, Op_RegI, 99, R19->as_VMReg()->next()); 122 reg_def R20 ( SOC, SOE, Op_RegI, 20, R20->as_VMReg() ); // nv 123 reg_def R20_H( SOC, SOE, Op_RegI, 99, R20->as_VMReg()->next()); 124 reg_def R21 ( SOC, SOE, Op_RegI, 21, R21->as_VMReg() ); // nv 125 reg_def R21_H( SOC, SOE, Op_RegI, 99, R21->as_VMReg()->next()); 126 reg_def R22 ( SOC, SOE, Op_RegI, 22, R22->as_VMReg() ); // nv 127 reg_def R22_H( SOC, SOE, Op_RegI, 99, R22->as_VMReg()->next()); 128 reg_def R23 ( SOC, SOE, Op_RegI, 23, R23->as_VMReg() ); // nv 129 reg_def R23_H( SOC, SOE, Op_RegI, 99, R23->as_VMReg()->next()); 130 reg_def R24 ( SOC, SOE, Op_RegI, 24, R24->as_VMReg() ); // nv 131 reg_def R24_H( SOC, SOE, Op_RegI, 99, R24->as_VMReg()->next()); 132 reg_def R25 ( SOC, SOE, Op_RegI, 25, R25->as_VMReg() ); // nv 133 reg_def R25_H( SOC, SOE, Op_RegI, 99, R25->as_VMReg()->next()); 134 reg_def R26 ( SOC, SOE, Op_RegI, 26, R26->as_VMReg() ); // nv 135 reg_def R26_H( SOC, SOE, Op_RegI, 99, R26->as_VMReg()->next()); 136 reg_def R27 ( SOC, SOE, Op_RegI, 27, R27->as_VMReg() ); // nv 137 reg_def R27_H( SOC, SOE, Op_RegI, 99, R27->as_VMReg()->next()); 138 reg_def R28 ( SOC, SOE, Op_RegI, 28, R28->as_VMReg() ); // nv 139 reg_def R28_H( SOC, SOE, Op_RegI, 99, R28->as_VMReg()->next()); 140 reg_def R29 ( SOC, SOE, Op_RegI, 29, R29->as_VMReg() ); // nv 141 reg_def R29_H( SOC, SOE, Op_RegI, 99, R29->as_VMReg()->next()); 142 reg_def R30 ( SOC, SOE, Op_RegI, 30, R30->as_VMReg() ); // nv 143 reg_def R30_H( SOC, SOE, Op_RegI, 99, R30->as_VMReg()->next()); 144 reg_def R31 ( SOC, SOE, Op_RegI, 31, R31->as_VMReg() ); // nv 145 reg_def R31_H( SOC, SOE, Op_RegI, 99, R31->as_VMReg()->next()); 146 147 148 // ---------------------------- 149 // Float/Double Registers 150 // ---------------------------- 151 152 // Double Registers 153 // The rules of ADL require that double registers be defined in pairs. 154 // Each pair must be two 32-bit values, but not necessarily a pair of 155 // single float registers. In each pair, ADLC-assigned register numbers 156 // must be adjacent, with the lower number even. Finally, when the 157 // CPU stores such a register pair to memory, the word associated with 158 // the lower ADLC-assigned number must be stored to the lower address. 159 160 // PPC64 has 32 64-bit floating-point registers. Each can store a single 161 // or double precision floating-point value. 162 163 // types: v = volatile, nv = non-volatile, s = system 164 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg() ); // v scratch 165 reg_def F0_H ( SOC, SOC, Op_RegF, 99, F0->as_VMReg()->next() ); 166 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg() ); // v farg1 & fret 167 reg_def F1_H ( SOC, SOC, Op_RegF, 99, F1->as_VMReg()->next() ); 168 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg() ); // v farg2 169 reg_def F2_H ( SOC, SOC, Op_RegF, 99, F2->as_VMReg()->next() ); 170 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg() ); // v farg3 171 reg_def F3_H ( SOC, SOC, Op_RegF, 99, F3->as_VMReg()->next() ); 172 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg() ); // v farg4 173 reg_def F4_H ( SOC, SOC, Op_RegF, 99, F4->as_VMReg()->next() ); 174 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg() ); // v farg5 175 reg_def F5_H ( SOC, SOC, Op_RegF, 99, F5->as_VMReg()->next() ); 176 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg() ); // v farg6 177 reg_def F6_H ( SOC, SOC, Op_RegF, 99, F6->as_VMReg()->next() ); 178 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg() ); // v farg7 179 reg_def F7_H ( SOC, SOC, Op_RegF, 99, F7->as_VMReg()->next() ); 180 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg() ); // v farg8 181 reg_def F8_H ( SOC, SOC, Op_RegF, 99, F8->as_VMReg()->next() ); 182 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg() ); // v farg9 183 reg_def F9_H ( SOC, SOC, Op_RegF, 99, F9->as_VMReg()->next() ); 184 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg() ); // v farg10 185 reg_def F10_H( SOC, SOC, Op_RegF, 99, F10->as_VMReg()->next()); 186 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg() ); // v farg11 187 reg_def F11_H( SOC, SOC, Op_RegF, 99, F11->as_VMReg()->next()); 188 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg() ); // v farg12 189 reg_def F12_H( SOC, SOC, Op_RegF, 99, F12->as_VMReg()->next()); 190 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg() ); // v farg13 191 reg_def F13_H( SOC, SOC, Op_RegF, 99, F13->as_VMReg()->next()); 192 reg_def F14 ( SOC, SOE, Op_RegF, 14, F14->as_VMReg() ); // nv 193 reg_def F14_H( SOC, SOE, Op_RegF, 99, F14->as_VMReg()->next()); 194 reg_def F15 ( SOC, SOE, Op_RegF, 15, F15->as_VMReg() ); // nv 195 reg_def F15_H( SOC, SOE, Op_RegF, 99, F15->as_VMReg()->next()); 196 reg_def F16 ( SOC, SOE, Op_RegF, 16, F16->as_VMReg() ); // nv 197 reg_def F16_H( SOC, SOE, Op_RegF, 99, F16->as_VMReg()->next()); 198 reg_def F17 ( SOC, SOE, Op_RegF, 17, F17->as_VMReg() ); // nv 199 reg_def F17_H( SOC, SOE, Op_RegF, 99, F17->as_VMReg()->next()); 200 reg_def F18 ( SOC, SOE, Op_RegF, 18, F18->as_VMReg() ); // nv 201 reg_def F18_H( SOC, SOE, Op_RegF, 99, F18->as_VMReg()->next()); 202 reg_def F19 ( SOC, SOE, Op_RegF, 19, F19->as_VMReg() ); // nv 203 reg_def F19_H( SOC, SOE, Op_RegF, 99, F19->as_VMReg()->next()); 204 reg_def F20 ( SOC, SOE, Op_RegF, 20, F20->as_VMReg() ); // nv 205 reg_def F20_H( SOC, SOE, Op_RegF, 99, F20->as_VMReg()->next()); 206 reg_def F21 ( SOC, SOE, Op_RegF, 21, F21->as_VMReg() ); // nv 207 reg_def F21_H( SOC, SOE, Op_RegF, 99, F21->as_VMReg()->next()); 208 reg_def F22 ( SOC, SOE, Op_RegF, 22, F22->as_VMReg() ); // nv 209 reg_def F22_H( SOC, SOE, Op_RegF, 99, F22->as_VMReg()->next()); 210 reg_def F23 ( SOC, SOE, Op_RegF, 23, F23->as_VMReg() ); // nv 211 reg_def F23_H( SOC, SOE, Op_RegF, 99, F23->as_VMReg()->next()); 212 reg_def F24 ( SOC, SOE, Op_RegF, 24, F24->as_VMReg() ); // nv 213 reg_def F24_H( SOC, SOE, Op_RegF, 99, F24->as_VMReg()->next()); 214 reg_def F25 ( SOC, SOE, Op_RegF, 25, F25->as_VMReg() ); // nv 215 reg_def F25_H( SOC, SOE, Op_RegF, 99, F25->as_VMReg()->next()); 216 reg_def F26 ( SOC, SOE, Op_RegF, 26, F26->as_VMReg() ); // nv 217 reg_def F26_H( SOC, SOE, Op_RegF, 99, F26->as_VMReg()->next()); 218 reg_def F27 ( SOC, SOE, Op_RegF, 27, F27->as_VMReg() ); // nv 219 reg_def F27_H( SOC, SOE, Op_RegF, 99, F27->as_VMReg()->next()); 220 reg_def F28 ( SOC, SOE, Op_RegF, 28, F28->as_VMReg() ); // nv 221 reg_def F28_H( SOC, SOE, Op_RegF, 99, F28->as_VMReg()->next()); 222 reg_def F29 ( SOC, SOE, Op_RegF, 29, F29->as_VMReg() ); // nv 223 reg_def F29_H( SOC, SOE, Op_RegF, 99, F29->as_VMReg()->next()); 224 reg_def F30 ( SOC, SOE, Op_RegF, 30, F30->as_VMReg() ); // nv 225 reg_def F30_H( SOC, SOE, Op_RegF, 99, F30->as_VMReg()->next()); 226 reg_def F31 ( SOC, SOE, Op_RegF, 31, F31->as_VMReg() ); // nv 227 reg_def F31_H( SOC, SOE, Op_RegF, 99, F31->as_VMReg()->next()); 228 229 // ---------------------------- 230 // Special Registers 231 // ---------------------------- 232 233 // Condition Codes Flag Registers 234 235 // PPC64 has 8 condition code "registers" which are all contained 236 // in the CR register. 237 238 // types: v = volatile, nv = non-volatile, s = system 239 reg_def CCR0(SOC, SOC, Op_RegFlags, 0, CCR0->as_VMReg()); // v 240 reg_def CCR1(SOC, SOC, Op_RegFlags, 1, CCR1->as_VMReg()); // v 241 reg_def CCR2(SOC, SOC, Op_RegFlags, 2, CCR2->as_VMReg()); // nv 242 reg_def CCR3(SOC, SOC, Op_RegFlags, 3, CCR3->as_VMReg()); // nv 243 reg_def CCR4(SOC, SOC, Op_RegFlags, 4, CCR4->as_VMReg()); // nv 244 reg_def CCR5(SOC, SOC, Op_RegFlags, 5, CCR5->as_VMReg()); // v 245 reg_def CCR6(SOC, SOC, Op_RegFlags, 6, CCR6->as_VMReg()); // v 246 reg_def CCR7(SOC, SOC, Op_RegFlags, 7, CCR7->as_VMReg()); // v 247 248 // Special registers of PPC64 249 250 reg_def SR_XER( SOC, SOC, Op_RegP, 0, SR_XER->as_VMReg()); // v 251 reg_def SR_LR( SOC, SOC, Op_RegP, 1, SR_LR->as_VMReg()); // v 252 reg_def SR_CTR( SOC, SOC, Op_RegP, 2, SR_CTR->as_VMReg()); // v 253 reg_def SR_VRSAVE( SOC, SOC, Op_RegP, 3, SR_VRSAVE->as_VMReg()); // v 254 reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v 255 reg_def SR_PPR( SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg()); // v 256 257 258 // ---------------------------- 259 // Specify priority of register selection within phases of register 260 // allocation. Highest priority is first. A useful heuristic is to 261 // give registers a low priority when they are required by machine 262 // instructions, like EAX and EDX on I486, and choose no-save registers 263 // before save-on-call, & save-on-call before save-on-entry. Registers 264 // which participate in fixed calling sequences should come last. 265 // Registers which are used as pairs must fall on an even boundary. 266 267 // It's worth about 1% on SPEC geomean to get this right. 268 269 // Chunk0, chunk1, and chunk2 form the MachRegisterNumbers enumeration 270 // in adGlobals_ppc64.hpp which defines the <register>_num values, e.g. 271 // R3_num. Therefore, R3_num may not be (and in reality is not) 272 // the same as R3->encoding()! Furthermore, we cannot make any 273 // assumptions on ordering, e.g. R3_num may be less than R2_num. 274 // Additionally, the function 275 // static enum RC rc_class(OptoReg::Name reg ) 276 // maps a given <register>_num value to its chunk type (except for flags) 277 // and its current implementation relies on chunk0 and chunk1 having a 278 // size of 64 each. 279 280 // If you change this allocation class, please have a look at the 281 // default values for the parameters RoundRobinIntegerRegIntervalStart 282 // and RoundRobinFloatRegIntervalStart 283 284 alloc_class chunk0 ( 285 // Chunk0 contains *all* 64 integer registers halves. 286 287 // "non-volatile" registers 288 R14, R14_H, 289 R15, R15_H, 290 R17, R17_H, 291 R18, R18_H, 292 R19, R19_H, 293 R20, R20_H, 294 R21, R21_H, 295 R22, R22_H, 296 R23, R23_H, 297 R24, R24_H, 298 R25, R25_H, 299 R26, R26_H, 300 R27, R27_H, 301 R28, R28_H, 302 R29, R29_H, 303 R30, R30_H, 304 R31, R31_H, 305 306 // scratch/special registers 307 R11, R11_H, 308 R12, R12_H, 309 310 // argument registers 311 R10, R10_H, 312 R9, R9_H, 313 R8, R8_H, 314 R7, R7_H, 315 R6, R6_H, 316 R5, R5_H, 317 R4, R4_H, 318 R3, R3_H, 319 320 // special registers, not available for allocation 321 R16, R16_H, // R16_thread 322 R13, R13_H, // system thread id 323 R2, R2_H, // may be used for TOC 324 R1, R1_H, // SP 325 R0, R0_H // R0 (scratch) 326 ); 327 328 // If you change this allocation class, please have a look at the 329 // default values for the parameters RoundRobinIntegerRegIntervalStart 330 // and RoundRobinFloatRegIntervalStart 331 332 alloc_class chunk1 ( 333 // Chunk1 contains *all* 64 floating-point registers halves. 334 335 // scratch register 336 F0, F0_H, 337 338 // argument registers 339 F13, F13_H, 340 F12, F12_H, 341 F11, F11_H, 342 F10, F10_H, 343 F9, F9_H, 344 F8, F8_H, 345 F7, F7_H, 346 F6, F6_H, 347 F5, F5_H, 348 F4, F4_H, 349 F3, F3_H, 350 F2, F2_H, 351 F1, F1_H, 352 353 // non-volatile registers 354 F14, F14_H, 355 F15, F15_H, 356 F16, F16_H, 357 F17, F17_H, 358 F18, F18_H, 359 F19, F19_H, 360 F20, F20_H, 361 F21, F21_H, 362 F22, F22_H, 363 F23, F23_H, 364 F24, F24_H, 365 F25, F25_H, 366 F26, F26_H, 367 F27, F27_H, 368 F28, F28_H, 369 F29, F29_H, 370 F30, F30_H, 371 F31, F31_H 372 ); 373 374 alloc_class chunk2 ( 375 // Chunk2 contains *all* 8 condition code registers. 376 377 CCR0, 378 CCR1, 379 CCR2, 380 CCR3, 381 CCR4, 382 CCR5, 383 CCR6, 384 CCR7 385 ); 386 387 alloc_class chunk3 ( 388 // special registers 389 // These registers are not allocated, but used for nodes generated by postalloc expand. 390 SR_XER, 391 SR_LR, 392 SR_CTR, 393 SR_VRSAVE, 394 SR_SPEFSCR, 395 SR_PPR 396 ); 397 398 //-------Architecture Description Register Classes----------------------- 399 400 // Several register classes are automatically defined based upon 401 // information in this architecture description. 402 403 // 1) reg_class inline_cache_reg ( as defined in frame section ) 404 // 2) reg_class compiler_method_oop_reg ( as defined in frame section ) 405 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section ) 406 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 407 // 408 409 // ---------------------------- 410 // 32 Bit Register Classes 411 // ---------------------------- 412 413 // We specify registers twice, once as read/write, and once read-only. 414 // We use the read-only registers for source operands. With this, we 415 // can include preset read only registers in this class, as a hard-coded 416 // '0'-register. (We used to simulate this on ppc.) 417 418 // 32 bit registers that can be read and written i.e. these registers 419 // can be dest (or src) of normal instructions. 420 reg_class bits32_reg_rw( 421 /*R0*/ // R0 422 /*R1*/ // SP 423 R2, // TOC 424 R3, 425 R4, 426 R5, 427 R6, 428 R7, 429 R8, 430 R9, 431 R10, 432 R11, 433 R12, 434 /*R13*/ // system thread id 435 R14, 436 R15, 437 /*R16*/ // R16_thread 438 R17, 439 R18, 440 R19, 441 R20, 442 R21, 443 R22, 444 R23, 445 R24, 446 R25, 447 R26, 448 R27, 449 R28, 450 /*R29*/ // global TOC 451 /*R30*/ // Narrow Oop Base 452 R31 453 ); 454 455 // 32 bit registers that can only be read i.e. these registers can 456 // only be src of all instructions. 457 reg_class bits32_reg_ro( 458 /*R0*/ // R0 459 /*R1*/ // SP 460 R2 // TOC 461 R3, 462 R4, 463 R5, 464 R6, 465 R7, 466 R8, 467 R9, 468 R10, 469 R11, 470 R12, 471 /*R13*/ // system thread id 472 R14, 473 R15, 474 /*R16*/ // R16_thread 475 R17, 476 R18, 477 R19, 478 R20, 479 R21, 480 R22, 481 R23, 482 R24, 483 R25, 484 R26, 485 R27, 486 R28, 487 /*R29*/ 488 /*R30*/ // Narrow Oop Base 489 R31 490 ); 491 492 // Complement-required-in-pipeline operands for narrow oops. 493 reg_class bits32_reg_ro_not_complement ( 494 /*R0*/ // R0 495 R1, // SP 496 R2, // TOC 497 R3, 498 R4, 499 R5, 500 R6, 501 R7, 502 R8, 503 R9, 504 R10, 505 R11, 506 R12, 507 /*R13,*/ // system thread id 508 R14, 509 R15, 510 R16, // R16_thread 511 R17, 512 R18, 513 R19, 514 R20, 515 R21, 516 R22, 517 /*R23, 518 R24, 519 R25, 520 R26, 521 R27, 522 R28,*/ 523 /*R29,*/ // TODO: let allocator handle TOC!! 524 /*R30,*/ 525 R31 526 ); 527 528 // Complement-required-in-pipeline operands for narrow oops. 529 // See 64-bit declaration. 530 reg_class bits32_reg_ro_complement ( 531 R23, 532 R24, 533 R25, 534 R26, 535 R27, 536 R28 537 ); 538 539 reg_class rscratch1_bits32_reg(R11); 540 reg_class rscratch2_bits32_reg(R12); 541 reg_class rarg1_bits32_reg(R3); 542 reg_class rarg2_bits32_reg(R4); 543 reg_class rarg3_bits32_reg(R5); 544 reg_class rarg4_bits32_reg(R6); 545 546 // ---------------------------- 547 // 64 Bit Register Classes 548 // ---------------------------- 549 // 64-bit build means 64-bit pointers means hi/lo pairs 550 551 reg_class rscratch1_bits64_reg(R11_H, R11); 552 reg_class rscratch2_bits64_reg(R12_H, R12); 553 reg_class rarg1_bits64_reg(R3_H, R3); 554 reg_class rarg2_bits64_reg(R4_H, R4); 555 reg_class rarg3_bits64_reg(R5_H, R5); 556 reg_class rarg4_bits64_reg(R6_H, R6); 557 // Thread register, 'written' by tlsLoadP, see there. 558 reg_class thread_bits64_reg(R16_H, R16); 559 560 reg_class r19_bits64_reg(R19_H, R19); 561 562 // 64 bit registers that can be read and written i.e. these registers 563 // can be dest (or src) of normal instructions. 564 reg_class bits64_reg_rw( 565 /*R0_H, R0*/ // R0 566 /*R1_H, R1*/ // SP 567 R2_H, R2, // TOC 568 R3_H, R3, 569 R4_H, R4, 570 R5_H, R5, 571 R6_H, R6, 572 R7_H, R7, 573 R8_H, R8, 574 R9_H, R9, 575 R10_H, R10, 576 R11_H, R11, 577 R12_H, R12, 578 /*R13_H, R13*/ // system thread id 579 R14_H, R14, 580 R15_H, R15, 581 /*R16_H, R16*/ // R16_thread 582 R17_H, R17, 583 R18_H, R18, 584 R19_H, R19, 585 R20_H, R20, 586 R21_H, R21, 587 R22_H, R22, 588 R23_H, R23, 589 R24_H, R24, 590 R25_H, R25, 591 R26_H, R26, 592 R27_H, R27, 593 R28_H, R28, 594 /*R29_H, R29*/ 595 /*R30_H, R30*/ 596 R31_H, R31 597 ); 598 599 // 64 bit registers used excluding r2, r11 and r12 600 // Used to hold the TOC to avoid collisions with expanded LeafCall which uses 601 // r2, r11 and r12 internally. 602 reg_class bits64_reg_leaf_call( 603 /*R0_H, R0*/ // R0 604 /*R1_H, R1*/ // SP 605 /*R2_H, R2*/ // TOC 606 R3_H, R3, 607 R4_H, R4, 608 R5_H, R5, 609 R6_H, R6, 610 R7_H, R7, 611 R8_H, R8, 612 R9_H, R9, 613 R10_H, R10, 614 /*R11_H, R11*/ 615 /*R12_H, R12*/ 616 /*R13_H, R13*/ // system thread id 617 R14_H, R14, 618 R15_H, R15, 619 /*R16_H, R16*/ // R16_thread 620 R17_H, R17, 621 R18_H, R18, 622 R19_H, R19, 623 R20_H, R20, 624 R21_H, R21, 625 R22_H, R22, 626 R23_H, R23, 627 R24_H, R24, 628 R25_H, R25, 629 R26_H, R26, 630 R27_H, R27, 631 R28_H, R28, 632 /*R29_H, R29*/ 633 /*R30_H, R30*/ 634 R31_H, R31 635 ); 636 637 // 64 bit registers used excluding r19. 638 // Used to hold the TOC to avoid collisions with expanded DynamicCall 639 // which uses r19 as inline cache internally. 640 reg_class bits64_reg_dynamic_call( 641 /*R0_H, R0*/ // R0 642 /*R1_H, R1*/ // SP 643 R2_H, R2, // TOC 644 R3_H, R3, 645 R4_H, R4, 646 R5_H, R5, 647 R6_H, R6, 648 R7_H, R7, 649 R8_H, R8, 650 R9_H, R9, 651 R10_H, R10, 652 R11_H, R11, 653 R12_H, R12, 654 /*R13_H, R13*/ // system thread id 655 R14_H, R14, 656 R15_H, R15, 657 /*R16_H, R16*/ // R16_thread 658 R17_H, R17, 659 R18_H, R18, 660 /*R19_H, R19*/ 661 R20_H, R20, 662 R21_H, R21, 663 R22_H, R22, 664 R23_H, R23, 665 R24_H, R24, 666 R25_H, R25, 667 R26_H, R26, 668 R27_H, R27, 669 R28_H, R28, 670 /*R29_H, R29*/ 671 /*R30_H, R30*/ 672 R31_H, R31 673 ); 674 675 // 64 bit registers that can only be read i.e. these registers can 676 // only be src of all instructions. 677 reg_class bits64_reg_ro( 678 /*R0_H, R0*/ // R0 679 R1_H, R1, 680 R2_H, R2, // TOC 681 R3_H, R3, 682 R4_H, R4, 683 R5_H, R5, 684 R6_H, R6, 685 R7_H, R7, 686 R8_H, R8, 687 R9_H, R9, 688 R10_H, R10, 689 R11_H, R11, 690 R12_H, R12, 691 /*R13_H, R13*/ // system thread id 692 R14_H, R14, 693 R15_H, R15, 694 R16_H, R16, // R16_thread 695 R17_H, R17, 696 R18_H, R18, 697 R19_H, R19, 698 R20_H, R20, 699 R21_H, R21, 700 R22_H, R22, 701 R23_H, R23, 702 R24_H, R24, 703 R25_H, R25, 704 R26_H, R26, 705 R27_H, R27, 706 R28_H, R28, 707 /*R29_H, R29*/ // TODO: let allocator handle TOC!! 708 /*R30_H, R30,*/ 709 R31_H, R31 710 ); 711 712 // Complement-required-in-pipeline operands. 713 reg_class bits64_reg_ro_not_complement ( 714 /*R0_H, R0*/ // R0 715 R1_H, R1, // SP 716 R2_H, R2, // TOC 717 R3_H, R3, 718 R4_H, R4, 719 R5_H, R5, 720 R6_H, R6, 721 R7_H, R7, 722 R8_H, R8, 723 R9_H, R9, 724 R10_H, R10, 725 R11_H, R11, 726 R12_H, R12, 727 /*R13_H, R13*/ // system thread id 728 R14_H, R14, 729 R15_H, R15, 730 R16_H, R16, // R16_thread 731 R17_H, R17, 732 R18_H, R18, 733 R19_H, R19, 734 R20_H, R20, 735 R21_H, R21, 736 R22_H, R22, 737 /*R23_H, R23, 738 R24_H, R24, 739 R25_H, R25, 740 R26_H, R26, 741 R27_H, R27, 742 R28_H, R28,*/ 743 /*R29_H, R29*/ // TODO: let allocator handle TOC!! 744 /*R30_H, R30,*/ 745 R31_H, R31 746 ); 747 748 // Complement-required-in-pipeline operands. 749 // This register mask is used for the trap instructions that implement 750 // the null checks on AIX. The trap instruction first computes the 751 // complement of the value it shall trap on. Because of this, the 752 // instruction can not be scheduled in the same cycle as an other 753 // instruction reading the normal value of the same register. So we 754 // force the value to check into 'bits64_reg_ro_not_complement' 755 // and then copy it to 'bits64_reg_ro_complement' for the trap. 756 reg_class bits64_reg_ro_complement ( 757 R23_H, R23, 758 R24_H, R24, 759 R25_H, R25, 760 R26_H, R26, 761 R27_H, R27, 762 R28_H, R28 763 ); 764 765 766 // ---------------------------- 767 // Special Class for Condition Code Flags Register 768 769 reg_class int_flags( 770 /*CCR0*/ // scratch 771 /*CCR1*/ // scratch 772 /*CCR2*/ // nv! 773 /*CCR3*/ // nv! 774 /*CCR4*/ // nv! 775 CCR5, 776 CCR6, 777 CCR7 778 ); 779 780 reg_class int_flags_CR0(CCR0); 781 reg_class int_flags_CR1(CCR1); 782 reg_class int_flags_CR6(CCR6); 783 reg_class ctr_reg(SR_CTR); 784 785 // ---------------------------- 786 // Float Register Classes 787 // ---------------------------- 788 789 reg_class flt_reg( 790 /*F0*/ // scratch 791 F1, 792 F2, 793 F3, 794 F4, 795 F5, 796 F6, 797 F7, 798 F8, 799 F9, 800 F10, 801 F11, 802 F12, 803 F13, 804 F14, // nv! 805 F15, // nv! 806 F16, // nv! 807 F17, // nv! 808 F18, // nv! 809 F19, // nv! 810 F20, // nv! 811 F21, // nv! 812 F22, // nv! 813 F23, // nv! 814 F24, // nv! 815 F25, // nv! 816 F26, // nv! 817 F27, // nv! 818 F28, // nv! 819 F29, // nv! 820 F30, // nv! 821 F31 // nv! 822 ); 823 824 // Double precision float registers have virtual `high halves' that 825 // are needed by the allocator. 826 reg_class dbl_reg( 827 /*F0, F0_H*/ // scratch 828 F1, F1_H, 829 F2, F2_H, 830 F3, F3_H, 831 F4, F4_H, 832 F5, F5_H, 833 F6, F6_H, 834 F7, F7_H, 835 F8, F8_H, 836 F9, F9_H, 837 F10, F10_H, 838 F11, F11_H, 839 F12, F12_H, 840 F13, F13_H, 841 F14, F14_H, // nv! 842 F15, F15_H, // nv! 843 F16, F16_H, // nv! 844 F17, F17_H, // nv! 845 F18, F18_H, // nv! 846 F19, F19_H, // nv! 847 F20, F20_H, // nv! 848 F21, F21_H, // nv! 849 F22, F22_H, // nv! 850 F23, F23_H, // nv! 851 F24, F24_H, // nv! 852 F25, F25_H, // nv! 853 F26, F26_H, // nv! 854 F27, F27_H, // nv! 855 F28, F28_H, // nv! 856 F29, F29_H, // nv! 857 F30, F30_H, // nv! 858 F31, F31_H // nv! 859 ); 860 861 %} 862 863 //----------DEFINITION BLOCK--------------------------------------------------- 864 // Define name --> value mappings to inform the ADLC of an integer valued name 865 // Current support includes integer values in the range [0, 0x7FFFFFFF] 866 // Format: 867 // int_def <name> ( <int_value>, <expression>); 868 // Generated Code in ad_<arch>.hpp 869 // #define <name> (<expression>) 870 // // value == <int_value> 871 // Generated code in ad_<arch>.cpp adlc_verification() 872 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); 873 // 874 definitions %{ 875 // The default cost (of an ALU instruction). 876 int_def DEFAULT_COST_LOW ( 30, 30); 877 int_def DEFAULT_COST ( 100, 100); 878 int_def HUGE_COST (1000000, 1000000); 879 880 // Memory refs 881 int_def MEMORY_REF_COST_LOW ( 200, DEFAULT_COST * 2); 882 int_def MEMORY_REF_COST ( 300, DEFAULT_COST * 3); 883 884 // Branches are even more expensive. 885 int_def BRANCH_COST ( 900, DEFAULT_COST * 9); 886 int_def CALL_COST ( 1300, DEFAULT_COST * 13); 887 %} 888 889 890 //----------SOURCE BLOCK------------------------------------------------------- 891 // This is a block of C++ code which provides values, functions, and 892 // definitions necessary in the rest of the architecture description. 893 source_hpp %{ 894 // Header information of the source block. 895 // Method declarations/definitions which are used outside 896 // the ad-scope can conveniently be defined here. 897 // 898 // To keep related declarations/definitions/uses close together, 899 // we switch between source %{ }% and source_hpp %{ }% freely as needed. 900 901 // Returns true if Node n is followed by a MemBar node that 902 // will do an acquire. If so, this node must not do the acquire 903 // operation. 904 bool followed_by_acquire(const Node *n); 905 %} 906 907 source %{ 908 909 // Optimize load-acquire. 910 // 911 // Check if acquire is unnecessary due to following operation that does 912 // acquire anyways. 913 // Walk the pattern: 914 // 915 // n: Load.acq 916 // | 917 // MemBarAcquire 918 // | | 919 // Proj(ctrl) Proj(mem) 920 // | | 921 // MemBarRelease/Volatile 922 // 923 bool followed_by_acquire(const Node *load) { 924 assert(load->is_Load(), "So far implemented only for loads."); 925 926 // Find MemBarAcquire. 927 const Node *mba = NULL; 928 for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) { 929 const Node *out = load->fast_out(i); 930 if (out->Opcode() == Op_MemBarAcquire) { 931 if (out->in(0) == load) continue; // Skip control edge, membar should be found via precedence edge. 932 mba = out; 933 break; 934 } 935 } 936 if (!mba) return false; 937 938 // Find following MemBar node. 939 // 940 // The following node must be reachable by control AND memory 941 // edge to assure no other operations are in between the two nodes. 942 // 943 // So first get the Proj node, mem_proj, to use it to iterate forward. 944 Node *mem_proj = NULL; 945 for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) { 946 mem_proj = mba->fast_out(i); // Throw out-of-bounds if proj not found 947 assert(mem_proj->is_Proj(), "only projections here"); 948 ProjNode *proj = mem_proj->as_Proj(); 949 if (proj->_con == TypeFunc::Memory && 950 !Compile::current()->node_arena()->contains(mem_proj)) // Unmatched old-space only 951 break; 952 } 953 assert(mem_proj->as_Proj()->_con == TypeFunc::Memory, "Graph broken"); 954 955 // Search MemBar behind Proj. If there are other memory operations 956 // behind the Proj we lost. 957 for (DUIterator_Fast jmax, j = mem_proj->fast_outs(jmax); j < jmax; j++) { 958 Node *x = mem_proj->fast_out(j); 959 // Proj might have an edge to a store or load node which precedes the membar. 960 if (x->is_Mem()) return false; 961 962 // On PPC64 release and volatile are implemented by an instruction 963 // that also has acquire semantics. I.e. there is no need for an 964 // acquire before these. 965 int xop = x->Opcode(); 966 if (xop == Op_MemBarRelease || xop == Op_MemBarVolatile) { 967 // Make sure we're not missing Call/Phi/MergeMem by checking 968 // control edges. The control edge must directly lead back 969 // to the MemBarAcquire 970 Node *ctrl_proj = x->in(0); 971 if (ctrl_proj->is_Proj() && ctrl_proj->in(0) == mba) { 972 return true; 973 } 974 } 975 } 976 977 return false; 978 } 979 980 #define __ _masm. 981 982 // Tertiary op of a LoadP or StoreP encoding. 983 #define REGP_OP true 984 985 static ConditionRegister reg_to_ConditionRegister_object(int register_encoding); 986 static FloatRegister reg_to_FloatRegister_object(int register_encoding); 987 static Register reg_to_register_object(int register_encoding); 988 989 // **************************************************************************** 990 991 // REQUIRED FUNCTIONALITY 992 993 // !!!!! Special hack to get all type of calls to specify the byte offset 994 // from the start of the call to the point where the return address 995 // will point. 996 997 // PPC port: Removed use of lazy constant construct. 998 999 int MachCallStaticJavaNode::ret_addr_offset() { 1000 // It's only a single branch-and-link instruction. 1001 return 4; 1002 } 1003 1004 int MachCallDynamicJavaNode::ret_addr_offset() { 1005 // Offset is 4 with postalloc expanded calls (bl is one instruction). We use 1006 // postalloc expanded calls if we use inline caches and do not update method data. 1007 if (UseInlineCaches) 1008 return 4; 1009 1010 int vtable_index = this->_vtable_index; 1011 if (vtable_index < 0) { 1012 // Must be invalid_vtable_index, not nonvirtual_vtable_index. 1013 assert(vtable_index == methodOopDesc::invalid_vtable_index, "correct sentinel value"); 1014 return 12; 1015 } else { 1016 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 1017 return 24; 1018 } 1019 } 1020 1021 int MachCallRuntimeNode::ret_addr_offset() { 1022 #if defined(ABI_ELFv2) 1023 return 28; 1024 #else 1025 return 40; 1026 #endif 1027 } 1028 1029 //============================================================================= 1030 1031 // condition code conversions 1032 1033 static int cc_to_boint(int cc) { 1034 return Assembler::bcondCRbiIs0 | (cc & 8); 1035 } 1036 1037 static int cc_to_inverse_boint(int cc) { 1038 return Assembler::bcondCRbiIs0 | (8-(cc & 8)); 1039 } 1040 1041 static int cc_to_biint(int cc, int flags_reg) { 1042 return (flags_reg << 2) | (cc & 3); 1043 } 1044 1045 //============================================================================= 1046 1047 // Compute padding required for nodes which need alignment. The padding 1048 // is the number of bytes (not instructions) which will be inserted before 1049 // the instruction. The padding must match the size of a NOP instruction. 1050 1051 int string_indexOf_imm1_charNode::compute_padding(int current_offset) const { 1052 return (3*4-current_offset)&31; 1053 } 1054 1055 int string_indexOf_imm1Node::compute_padding(int current_offset) const { 1056 return (2*4-current_offset)&31; 1057 } 1058 1059 int string_indexOf_immNode::compute_padding(int current_offset) const { 1060 return (3*4-current_offset)&31; 1061 } 1062 1063 int string_indexOfNode::compute_padding(int current_offset) const { 1064 return (1*4-current_offset)&31; 1065 } 1066 1067 int string_compareNode::compute_padding(int current_offset) const { 1068 return (4*4-current_offset)&31; 1069 } 1070 1071 int string_equals_immNode::compute_padding(int current_offset) const { 1072 if (opnd_array(3)->constant() < 16) return 0; // Don't insert nops for short version (loop completely unrolled). 1073 return (2*4-current_offset)&31; 1074 } 1075 1076 int string_equalsNode::compute_padding(int current_offset) const { 1077 return (7*4-current_offset)&31; 1078 } 1079 1080 int inlineCallClearArrayNode::compute_padding(int current_offset) const { 1081 return (2*4-current_offset)&31; 1082 } 1083 1084 //============================================================================= 1085 1086 // Indicate if the safepoint node needs the polling page as an input. 1087 bool SafePointNode::needs_polling_address_input() { 1088 // The address is loaded from thread by a seperate node. 1089 return true; 1090 } 1091 1092 //============================================================================= 1093 1094 // Emit an interrupt that is caught by the debugger (for debugging compiler). 1095 void emit_break(CodeBuffer &cbuf) { 1096 MacroAssembler _masm(&cbuf); 1097 __ illtrap(); 1098 } 1099 1100 #ifndef PRODUCT 1101 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 1102 st->print("BREAKPOINT"); 1103 } 1104 #endif 1105 1106 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1107 emit_break(cbuf); 1108 } 1109 1110 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { 1111 return MachNode::size(ra_); 1112 } 1113 1114 //============================================================================= 1115 1116 void emit_nop(CodeBuffer &cbuf) { 1117 MacroAssembler _masm(&cbuf); 1118 __ nop(); 1119 } 1120 1121 static inline void emit_long(CodeBuffer &cbuf, int value) { 1122 *((int*)(cbuf.insts_end())) = value; 1123 cbuf.set_insts_end(cbuf.insts_end() + BytesPerInstWord); 1124 } 1125 1126 //============================================================================= 1127 1128 // Emit a java_to_interp stub. 1129 // 1130 // The stub is initially created with a -1 target and a NULL oop, the stub 1131 // is fixed up when the corresponding call is converted from calling compiled 1132 // code to calling interpreted code. 1133 1134 // Offset from start of compiled java to interpreter stub to the load 1135 // constant that loads the inline cache (IC) (8 if we have to load 1136 // toc). 1137 const int CompiledStaticCall::comp_to_int_load_offset = 8; 1138 1139 const uint java_to_interp_stub_size = 12 * BytesPerInstWord; 1140 1141 void emit_java_to_interp_stub(MacroAssembler &_masm, const int insts_relocation_offset) { 1142 // Start the stub. 1143 address stub = __ start_a_stub(java_to_interp_stub_size); 1144 if (stub == NULL) { 1145 Compile::current()->env()->record_out_of_memory_failure(); 1146 return; 1147 } 1148 1149 // For java_to_interp stubs we use R11_scratch1 as scratch register 1150 // and in call trampoline stubs we use R12_scratch2. This way we 1151 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1152 Register reg_scratch = R11_scratch1; 1153 1154 // Create a static stub relocation which relates this stub 1155 // with the call instruction at insts_call_instruction_offset in the 1156 // instructions code-section. 1157 __ relocate(static_stub_Relocation::spec(__ code()->insts()->start() + insts_relocation_offset)); 1158 const int stub_start_offset = __ offset(); 1159 1160 // Now, create the stub's code: 1161 // - load the TOC 1162 // - load the inline cache oop from the constant pool 1163 // - load the call target from the constant pool 1164 // - call 1165 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1166 AddressLiteral oop = __ allocate_oop_address(NULL); 1167 __ load_const_from_method_toc(reg_to_register_object(Matcher::inline_cache_reg_encode()), oop, reg_scratch); 1168 1169 if (ReoptimizeCallSequences) { 1170 __ b64_patchable((address)-1, relocInfo::none); 1171 } else { 1172 AddressLiteral a((address)-1); 1173 __ load_const_from_method_toc(reg_scratch, a, reg_scratch); 1174 __ mtctr(reg_scratch); 1175 __ bctr(); 1176 } 1177 1178 // FIXME: Assert that the stub can be identified and patched. 1179 1180 // Java_to_interp_stub_size should be good. 1181 assert((uint)(__ offset() - stub_start_offset) <= java_to_interp_stub_size, "should be good size"); 1182 assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)), 1183 "must not confuse java_to_interp with trampoline stubs"); 1184 1185 // End the stub. 1186 __ end_a_stub(); 1187 } 1188 1189 // Size of java_to_interp stub, this doesn't need to be accurate but it must 1190 // be larger or equal to the real size of the stub. 1191 // Used for optimization in Compile::Shorten_branches. 1192 uint size_java_to_interp() { 1193 return java_to_interp_stub_size; 1194 } 1195 1196 // Number of relocation entries needed by compiled java to interpreter 1197 // call stub. 1198 // Used for optimization in Compile::Shorten_branches. 1199 uint reloc_java_to_interp() { 1200 return 5; 1201 } 1202 1203 //============================================================================= 1204 1205 // Emit a trampoline stub for a call to a target which is too far away. 1206 // 1207 // code sequences: 1208 // 1209 // call-site: 1210 // branch-and-link to <destination> or <trampoline stub> 1211 // 1212 // Related trampoline stub for this call-site in the stub section: 1213 // load the call target from the constant pool 1214 // branch via CTR (LR/link still points to the call-site above) 1215 1216 const uint trampoline_stub_size = 6 * BytesPerInstWord; 1217 1218 void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) { 1219 // Start the stub. 1220 address stub = __ start_a_stub(Compile::MAX_stubs_size/2); 1221 if (stub == NULL) { 1222 Compile::current()->env()->record_out_of_memory_failure(); 1223 return; 1224 } 1225 1226 // For java_to_interp stubs we use R11_scratch1 as scratch register 1227 // and in call trampoline stubs we use R12_scratch2. This way we 1228 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1229 Register reg_scratch = R12_scratch2; 1230 1231 // Create a trampoline stub relocation which relates this trampoline stub 1232 // with the call instruction at insts_call_instruction_offset in the 1233 // instructions code-section. 1234 __ relocate(trampoline_stub_Relocation::spec(__ code()->insts()->start() + insts_call_instruction_offset)); 1235 const int stub_start_offset = __ offset(); 1236 1237 // Now, create the trampoline stub's code: 1238 // - load the TOC 1239 // - load the call target from the constant pool 1240 // - call 1241 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1242 __ ld_largeoffset_unchecked(reg_scratch, destination_toc_offset, reg_scratch, false); 1243 __ mtctr(reg_scratch); 1244 __ bctr(); 1245 1246 const address stub_start_addr = __ addr_at(stub_start_offset); 1247 1248 // FIXME: Assert that the trampoline stub can be identified and patched. 1249 1250 // Assert that the encoded destination_toc_offset can be identified and that it is correct. 1251 assert(destination_toc_offset == NativeCallTrampolineStub_at(stub_start_addr)->destination_toc_offset(), 1252 "encoded offset into the constant pool must match"); 1253 // Trampoline_stub_size should be good. 1254 assert((uint)(__ offset() - stub_start_offset) <= trampoline_stub_size, "should be good size"); 1255 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 1256 1257 // End the stub. 1258 __ end_a_stub(); 1259 } 1260 1261 // Size of trampoline stub, this doesn't need to be accurate but it must 1262 // be larger or equal to the real size of the stub. 1263 // Used for optimization in Compile::Shorten_branches. 1264 uint size_call_trampoline() { 1265 return trampoline_stub_size; 1266 } 1267 1268 // Number of relocation entries needed by trampoline stub. 1269 // Used for optimization in Compile::Shorten_branches. 1270 uint reloc_call_trampoline() { 1271 return 5; 1272 } 1273 1274 //============================================================================= 1275 1276 // Emit an inline branch-and-link call and a related trampoline stub. 1277 // 1278 // code sequences: 1279 // 1280 // call-site: 1281 // branch-and-link to <destination> or <trampoline stub> 1282 // 1283 // Related trampoline stub for this call-site in the stub section: 1284 // load the call target from the constant pool 1285 // branch via CTR (LR/link still points to the call-site above) 1286 // 1287 1288 typedef struct { 1289 int insts_call_instruction_offset; 1290 int ret_addr_offset; 1291 } EmitCallOffsets; 1292 1293 // Emit a branch-and-link instruction that branches to a trampoline. 1294 // - Remember the offset of the branch-and-link instruction. 1295 // - Add a relocation at the branch-and-link instruction. 1296 // - Emit a branch-and-link. 1297 // - Remember the return pc offset. 1298 EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address entry_point, relocInfo::relocType rtype) { 1299 EmitCallOffsets offsets = { -1, -1 }; 1300 const int start_offset = __ offset(); 1301 offsets.insts_call_instruction_offset = __ offset(); 1302 1303 // No entry point given, use the current pc. 1304 if (entry_point == NULL) entry_point = __ pc(); 1305 1306 if (!Compile::current()->in_scratch_emit_size()) { 1307 // Put the entry point as a constant into the constant pool. 1308 const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none); 1309 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 1310 1311 // Emit the trampoline stub which will be related to the branch-and-link below. 1312 emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset); 1313 __ relocate(rtype); 1314 } 1315 1316 // Note: At this point we do not have the address of the trampoline 1317 // stub, and the entry point might be too far away for bl, so __ pc() 1318 // serves as dummy and the bl will be patched later. 1319 __ bl((address) __ pc()); 1320 1321 offsets.ret_addr_offset = __ offset() - start_offset; 1322 1323 return offsets; 1324 } 1325 1326 //============================================================================= 1327 1328 // Factory for creating loadConL* nodes for large/small constant pool. 1329 1330 static inline jlong replicate_immF(float con) { 1331 // Replicate float con 2 times and pack into vector. 1332 int val = *((int*)&con); 1333 jlong lval = val; 1334 lval = (lval << 32) | (lval & 0xFFFFFFFFl); 1335 return lval; 1336 } 1337 1338 typedef struct { 1339 loadConL_hiNode *_large_hi; 1340 loadConL_loNode *_large_lo; 1341 loadConLNode *_small; 1342 MachNode *_last; 1343 } loadConLNodesTuple; 1344 1345 loadConLNodesTuple loadConLNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc, 1346 bool isoop, OptoReg::Name reg_second, OptoReg::Name reg_first) { 1347 loadConLNodesTuple nodes; 1348 MachOper *immIsOop = new (C) immIOper(isoop ? 1 : 0); 1349 1350 const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000; 1351 if (large_constant_pool) { 1352 // Create new nodes. 1353 loadConL_hiNode *m1 = new (C) loadConL_hiNode(); 1354 loadConL_loNode *m2 = new (C) loadConL_loNode(); 1355 1356 // inputs for new nodes 1357 m1->add_req(NULL, toc); 1358 m2->add_req(NULL, m1); 1359 1360 // operands for new nodes 1361 m1->_opnds[0] = new (C) iRegLdstOper(); // dst 1362 m1->_opnds[1] = immSrc; // src 1363 m1->_opnds[2] = new (C) iRegPdstOper(); // toc 1364 m1->_opnds[3] = immIsOop; // isoop 1365 m2->_opnds[0] = new (C) iRegLdstOper(); // dst 1366 m2->_opnds[1] = immSrc; // src 1367 m2->_opnds[2] = new (C) iRegLdstOper(); // base 1368 m2->_opnds[3] = immIsOop; // isoop 1369 1370 // Initialize ins_attrib TOC fields. 1371 m1->_const_toc_offset = -1; 1372 m2->_const_toc_offset_hi_node = m1; 1373 1374 // Initialize ins_attrib instruction offset. 1375 m1->_cbuf_insts_offset = -1; 1376 1377 // register allocation for new nodes 1378 ra_->set_pair(m1->_idx, reg_second, reg_first); 1379 ra_->set_pair(m2->_idx, reg_second, reg_first); 1380 1381 // Create result. 1382 nodes._large_hi = m1; 1383 nodes._large_lo = m2; 1384 nodes._small = NULL; 1385 nodes._last = nodes._large_lo; 1386 assert(m2->bottom_type()->isa_long(), "must be long"); 1387 } else { 1388 loadConLNode *m2 = new (C) loadConLNode(); 1389 1390 // inputs for new nodes 1391 m2->add_req(NULL, toc); 1392 1393 // operands for new nodes 1394 m2->_opnds[0] = new (C) iRegLdstOper(); // dst 1395 m2->_opnds[1] = immSrc; // src 1396 m2->_opnds[2] = new (C) iRegPdstOper(); // toc 1397 m2->_opnds[3] = immIsOop; // isoop 1398 1399 // Initialize ins_attrib instruction offset. 1400 m2->_cbuf_insts_offset = -1; 1401 1402 // register allocation for new nodes 1403 ra_->set_pair(m2->_idx, reg_second, reg_first); 1404 1405 // Create result. 1406 nodes._large_hi = NULL; 1407 nodes._large_lo = NULL; 1408 nodes._small = m2; 1409 nodes._last = nodes._small; 1410 assert(m2->bottom_type()->isa_long(), "must be long"); 1411 } 1412 1413 return nodes; 1414 } 1415 1416 //============================================================================= 1417 1418 // Factory for creating loadConP* nodes for large/small constant pool. 1419 1420 typedef struct { 1421 loadConP_hiNode *_large_hi; 1422 loadConP_loNode *_large_lo; 1423 loadConPNode *_small; 1424 MachNode *_last; 1425 } loadConPNodesTuple; 1426 1427 loadConPNodesTuple loadConPNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immPOper *immSrc, 1428 OptoReg::Name reg_second, OptoReg::Name reg_first) { 1429 loadConPNodesTuple nodes; 1430 const bool isoop = immSrc->constant_is_oop(); 1431 MachOper *immIsOop = new (C) immIOper(isoop ? 1 : 0); 1432 1433 const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000; 1434 if (large_constant_pool) { 1435 // Create new nodes. 1436 loadConP_hiNode *m1 = new (C) loadConP_hiNode(); 1437 loadConP_loNode *m2 = new (C) loadConP_loNode(); 1438 1439 // inputs for new nodes 1440 m1->add_req(NULL, toc); 1441 m2->add_req(NULL, m1); 1442 1443 // operands for new nodes 1444 m1->_opnds[0] = new (C) iRegPdstOper(); // dst 1445 m1->_opnds[1] = immSrc; // src 1446 m1->_opnds[2] = new (C) iRegPdstOper(); // toc 1447 m1->_opnds[3] = immIsOop; // isoop 1448 m2->_opnds[0] = new (C) iRegPdstOper(); // dst 1449 m2->_opnds[1] = immSrc; // src 1450 m2->_opnds[2] = new (C) iRegLdstOper(); // base 1451 m2->_opnds[3] = immIsOop; // isoop 1452 1453 // Initialize ins_attrib TOC fields. 1454 m1->_const_toc_offset = -1; 1455 m2->_const_toc_offset_hi_node = m1; 1456 1457 // register allocation for new nodes 1458 ra_->set_pair(m1->_idx, reg_second, reg_first); 1459 ra_->set_pair(m2->_idx, reg_second, reg_first); 1460 1461 // Create result. 1462 nodes._large_hi = m1; 1463 nodes._large_lo = m2; 1464 nodes._small = NULL; 1465 nodes._last = nodes._large_lo; 1466 assert(m2->bottom_type()->isa_ptr(), "must be ptr"); 1467 } else { 1468 loadConPNode *m2 = new (C) loadConPNode(); 1469 1470 // inputs for new nodes 1471 m2->add_req(NULL, toc); 1472 1473 // operands for new nodes 1474 m2->_opnds[0] = new (C) iRegPdstOper(); // dst 1475 m2->_opnds[1] = immSrc; // src 1476 m2->_opnds[2] = new (C) iRegPdstOper(); // toc 1477 m2->_opnds[3] = immIsOop; // isoop 1478 1479 // register allocation for new nodes 1480 ra_->set_pair(m2->_idx, reg_second, reg_first); 1481 1482 nodes._large_hi = NULL; 1483 nodes._large_lo = NULL; 1484 nodes._small = m2; 1485 nodes._last = nodes._small; 1486 assert(m2->bottom_type()->isa_ptr(), "must be ptr"); 1487 } 1488 1489 return nodes; 1490 } 1491 1492 //============================================================================= 1493 1494 const RegMask& MachConstantBaseNode::_out_RegMask = BITS64_REG_RW_mask(); 1495 int Compile::ConstantTable::calculate_table_base_offset() const { 1496 return 0; // absolute addressing, no offset 1497 } 1498 1499 bool MachConstantBaseNode::requires_late_expand() const { return true; } 1500 void MachConstantBaseNode::lateExpand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { 1501 Compile *C = ra_->C; 1502 1503 iRegPdstOper *op_dst = new (C) iRegPdstOper(); 1504 MachNode *m1 = new (C) loadToc_hiNode(); 1505 MachNode *m2 = new (C) loadToc_loNode(); 1506 1507 m1->add_req(NULL); 1508 m2->add_req(NULL, m1); 1509 m1->_opnds[0] = op_dst; 1510 m2->_opnds[0] = op_dst; 1511 m2->_opnds[1] = op_dst; 1512 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 1513 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 1514 nodes->push(m1); 1515 nodes->push(m2); 1516 } 1517 1518 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1519 // Is late expanded. 1520 ShouldNotReachHere(); 1521 } 1522 1523 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 1524 return 0; 1525 } 1526 1527 #ifndef PRODUCT 1528 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1529 st->print("-- \t// MachConstantBaseNode (empty encoding)"); 1530 } 1531 #endif 1532 1533 //============================================================================= 1534 1535 #ifndef PRODUCT 1536 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 1537 Compile* C = ra_->C; 1538 const long framesize = C->frame_slots() << LogBytesPerInt; 1539 1540 st->print("PROLOG\n\t"); 1541 if (C->need_stack_bang(framesize)) { 1542 st->print("stack_overflow_check\n\t"); 1543 } 1544 1545 if (!false /* TODO: PPC port C->is_frameless_method()*/) { 1546 st->print("save return pc\n\t"); 1547 st->print("push frame %d\n\t", -framesize); 1548 } 1549 } 1550 #endif 1551 1552 // Macro used instead of the common __ to emulate the pipes of PPC. 1553 // Instead of e.g. __ ld(...) one hase to write ___(ld) ld(...) This enables the 1554 // micro scheduler to cope with "hand written" assembler like in the prolog. Though 1555 // still no scheduling of this code is possible, the micro scheduler is aware of the 1556 // code and can update its internal data. The following mechanism is used to achieve this: 1557 // The micro scheduler calls size() of each compound node during scheduling. size() does a 1558 // dummy emit and only during this dummy emit C->hb_scheduling() is not NULL. 1559 #if 0 // TODO: PPC port 1560 #define ___(op) if (UsePower6SchedulerPPC64 && C->hb_scheduling()) \ 1561 C->hb_scheduling()->_pdScheduling->PdEmulatePipe(ppc64Opcode_##op); \ 1562 _masm. 1563 #define ___stop if (UsePower6SchedulerPPC64 && C->hb_scheduling()) \ 1564 C->hb_scheduling()->_pdScheduling->PdEmulatePipe(archOpcode_none) 1565 #define ___advance if (UsePower6SchedulerPPC64 && C->hb_scheduling()) \ 1566 C->hb_scheduling()->_pdScheduling->advance_offset 1567 #else 1568 #define ___(op) if (UsePower6SchedulerPPC64) \ 1569 Unimplemented(); \ 1570 _masm. 1571 #define ___stop if (UsePower6SchedulerPPC64) \ 1572 Unimplemented() 1573 #define ___advance if (UsePower6SchedulerPPC64) \ 1574 Unimplemented() 1575 #endif 1576 1577 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1578 Compile* C = ra_->C; 1579 MacroAssembler _masm(&cbuf); 1580 1581 const long framesize = ((long)C->frame_slots()) << LogBytesPerInt; 1582 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); 1583 1584 const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/; 1585 1586 const Register return_pc = R20; // Must match return_addr() in frame section. 1587 const Register callers_sp = R21; 1588 const Register push_frame_temp = R22; 1589 const Register toc_temp = R23; 1590 assert_different_registers(R11, return_pc, callers_sp, push_frame_temp, toc_temp); 1591 1592 if (method_is_frameless) { 1593 // Add nop at beginning of all frameless methods to prevent any 1594 // oop instructions from getting overwritten by make_not_entrant 1595 // (patching attempt would fail). 1596 ___(nop) nop(); 1597 } else { 1598 // Get return pc. 1599 ___(mflr) mflr(return_pc); 1600 } 1601 1602 // Calls to C2R adapters often do not accept exceptional returns. 1603 // We require that their callers must bang for them. But be 1604 // careful, because some VM calls (such as call site linkage) can 1605 // use several kilobytes of stack. But the stack safety zone should 1606 // account for that. See bugs 4446381, 4468289, 4497237. 1607 if (C->need_stack_bang(framesize) && UseStackBanging) { 1608 // Unfortunately we cannot use the function provided in 1609 // assembler.cpp as we have to emulate the pipes. So I had to 1610 // insert the code of generate_stack_overflow_check(), see 1611 // assembler.cpp for some illuminative comments. 1612 const int page_size = os::vm_page_size(); 1613 int bang_end = StackShadowPages*page_size; 1614 1615 // This is how far the previous frame's stack banging extended. 1616 const int bang_end_safe = bang_end; 1617 1618 if (framesize > page_size) { 1619 bang_end += framesize; 1620 } 1621 1622 int bang_offset = bang_end_safe; 1623 1624 while (bang_offset <= bang_end) { 1625 // Need at least one stack bang at end of shadow zone. 1626 1627 // Again I had to copy code, this time from assembler_ppc64.cpp, 1628 // bang_stack_with_offset - see there for comments. 1629 1630 // Stack grows down, caller passes positive offset. 1631 assert(bang_offset > 0, "must bang with positive offset"); 1632 1633 long stdoffset = -bang_offset; 1634 1635 if (Assembler::is_simm(stdoffset, 16)) { 1636 // Signed 16 bit offset, a simple std is ok. 1637 if (UseLoadInstructionsForStackBangingPPC64) { 1638 ___(ld) ld(R0, (int)(signed short)stdoffset, R1_SP); 1639 } else { 1640 ___(std) std(R0, (int)(signed short)stdoffset, R1_SP); 1641 } 1642 } else if (Assembler::is_simm(stdoffset, 31)) { 1643 // Use largeoffset calculations for addis & ld/std. 1644 const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset); 1645 const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset); 1646 1647 Register tmp = R11; 1648 ___(addis) addis(tmp, R1_SP, hi); 1649 if (UseLoadInstructionsForStackBangingPPC64) { 1650 ___(ld) ld(R0, lo, tmp); 1651 } else { 1652 ___(std) std(R0, lo, tmp); 1653 } 1654 } else { 1655 ShouldNotReachHere(); 1656 } 1657 1658 bang_offset += page_size; 1659 } 1660 // R11 trashed 1661 } // C->need_stack_bang(framesize) && UseStackBanging 1662 1663 unsigned int bytes = (unsigned int)framesize; 1664 long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes); 1665 ciMethod *currMethod = C -> method(); 1666 1667 // Optimized version for most common case. 1668 if (UsePower6SchedulerPPC64 && 1669 !method_is_frameless && Assembler::is_simm((int)(-(_abi(lr) + offset)), 16) && 1670 !(false /* ConstantsALot TODO: PPC port*/)) { 1671 ___(or) mr(callers_sp, R1_SP); 1672 ___(addi) addi(R1_SP, R1_SP, -offset); 1673 ___stop; // Emulator won't recognize dependency. 1674 ___(std) std(return_pc, _abi(lr) + offset, R1_SP); 1675 ___(std) std(callers_sp, 0, R1_SP); 1676 return; 1677 } 1678 1679 if (!method_is_frameless) { 1680 // Get callers sp. 1681 ___(or) mr(callers_sp, R1_SP); 1682 1683 // Push method's frame, modifies SP. 1684 assert(Assembler::is_uimm(framesize, 32U), "wrong type"); 1685 // The ABI is already accounted for in 'framesize' via the 1686 // 'out_preserve' area. 1687 Register tmp = push_frame_temp; 1688 // Had to insert code of push_frame((unsigned int)framesize, push_frame_temp). 1689 if (Assembler::is_simm(-offset, 16)) { 1690 ___(stdu) stdu(R1_SP, -offset, R1_SP); 1691 } else { 1692 long x = -offset; 1693 // Had to insert load_const(tmp, -offset). 1694 ___(addis) lis( tmp, (int)((signed short)(((x >> 32) & 0xffff0000) >> 16))); 1695 ___(ori) ori( tmp, tmp, ((x >> 32) & 0x0000ffff)); 1696 ___(rldicr) sldi(tmp, tmp, 32); 1697 ___(oris) oris(tmp, tmp, (x & 0xffff0000) >> 16); 1698 ___(ori) ori( tmp, tmp, (x & 0x0000ffff)); 1699 1700 ___(stdux) stdux(R1_SP, R1_SP, tmp); 1701 } 1702 } 1703 #if 0 // TODO: PPC port 1704 // For testing large constant pools, emit a lot of constants to constant pool. 1705 // "Randomize" const_size. 1706 if (ConstantsALot) { 1707 const int num_consts = const_size(); 1708 for (int i = 0; i < num_consts; i++) { 1709 __ long_constant(0xB0B5B00BBABE); 1710 } 1711 } 1712 #endif 1713 if (!method_is_frameless) { 1714 // Save return pc. 1715 ___(std) std(return_pc, _abi(lr), callers_sp); 1716 } 1717 } 1718 #undef ___ 1719 #undef ___stop 1720 1721 uint MachPrologNode::size(PhaseRegAlloc *ra_) const { 1722 // Variable size. determine dynamically. 1723 return MachNode::size(ra_); 1724 } 1725 1726 int MachPrologNode::reloc() const { 1727 // Return number of relocatable values contained in this instruction. 1728 return 1; // 1 reloc entry for load_const(toc). 1729 } 1730 1731 //============================================================================= 1732 1733 #ifndef PRODUCT 1734 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 1735 Compile* C = ra_->C; 1736 1737 st->print("EPILOG\n\t"); 1738 st->print("restore return pc\n\t"); 1739 st->print("pop frame\n\t"); 1740 1741 if (do_polling() && C->is_method_compilation()) { 1742 st->print("touch polling page\n\t"); 1743 } 1744 } 1745 #endif 1746 1747 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1748 Compile* C = ra_->C; 1749 MacroAssembler _masm(&cbuf); 1750 1751 const long framesize = ((long)C->frame_slots()) << LogBytesPerInt; 1752 assert(framesize >= 0, "negative frame-size?"); 1753 1754 const bool method_needs_polling = do_polling() && C->is_method_compilation(); 1755 const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/; 1756 const Register return_pc = R11; 1757 const Register polling_page = R12; 1758 1759 if (!method_is_frameless) { 1760 // Restore return pc relative to callers' sp. 1761 __ ld(return_pc, ((int)framesize) + _abi(lr), R1_SP); 1762 } 1763 1764 if (method_needs_polling) { 1765 if (LoadPollAddressFromThread) { 1766 // TODO: PPC port __ ld(polling_page, in_bytes(JavaThread::poll_address_offset()), R16_thread); 1767 Unimplemented(); 1768 } else { 1769 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page()); // TODO: PPC port: get_standard_polling_page() 1770 } 1771 } 1772 1773 if (!method_is_frameless) { 1774 // Move return pc to LR. 1775 __ mtlr(return_pc); 1776 // Pop frame (fixed frame-size). 1777 __ addi(R1_SP, R1_SP, (int)framesize); 1778 } 1779 1780 if (method_needs_polling) { 1781 // We need to mark the code position where the load from the safepoint 1782 // polling page was emitted as relocInfo::poll_return_type here. 1783 __ relocate(relocInfo::poll_return_type); 1784 __ load_from_polling_page(polling_page); 1785 } 1786 } 1787 1788 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { 1789 // Variable size. Determine dynamically. 1790 return MachNode::size(ra_); 1791 } 1792 1793 int MachEpilogNode::reloc() const { 1794 // Return number of relocatable values contained in this instruction. 1795 return 1; // 1 for load_from_polling_page. 1796 } 1797 1798 const Pipeline * MachEpilogNode::pipeline() const { 1799 return MachNode::pipeline_class(); 1800 } 1801 1802 // This method seems to be obsolete. It is declared in machnode.hpp 1803 // and defined in all *.ad files, but it is never called. Should we 1804 // get rid of it? 1805 int MachEpilogNode::safepoint_offset() const { 1806 assert(do_polling(), "no return for this epilog node"); 1807 return 0; 1808 } 1809 1810 //============================================================================= 1811 1812 #if 0 // TODO: PPC port 1813 // SIGTRAP-based null checks (on AIX). 1814 #ifdef AIX 1815 void MachNullCheckNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 1816 // Emits entries in the null-pointer exception handler table ... 1817 assert(ImplicitNullChecks, "precondition"); 1818 assert(MachNullCheckNode::uses_checked_address_as_required_input(), "precondition"); 1819 1820 if (!memory_op_does_trap_hint()) { 1821 // ... and a null-check trap if the memory op is not guaranteed to be a store. 1822 MacroAssembler _masm(&cbuf); 1823 // Trap-instruction-based null checks. 1824 int reg = ra_->get_reg_first(in(MachNullCheckNode::checked_address_in_index())); 1825 _masm.trap_null_check(as_Register(Matcher::_regEncode[reg])); 1826 } 1827 } 1828 1829 uint MachNullCheckNode::size(PhaseRegAlloc *ra_) const { 1830 if (!memory_op_does_trap_hint()) { 1831 // We emit a trap instruction, 4 bytes. 1832 return 4; 1833 } else { 1834 // The store will handle the implicit null check, nothing here. 1835 return 0; 1836 } 1837 } 1838 #endif 1839 1840 void MachLoadPollAddrLateNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 1841 MacroAssembler _masm(&cbuf); 1842 if (LoadPollAddressFromThread) { 1843 _masm.ld(R11, in_bytes(JavaThread::poll_address_offset()), R16_thread); 1844 } else { 1845 _masm.nop(); 1846 } 1847 } 1848 1849 uint MachLoadPollAddrLateNode::size(PhaseRegAlloc* ra_) const { 1850 if (LoadPollAddressFromThread) { 1851 return 4; 1852 } else { 1853 return 4; 1854 } 1855 } 1856 1857 #ifndef PRODUCT 1858 void MachLoadPollAddrLateNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 1859 st->print_cr(" LD R11, PollAddressOffset, R16_thread \t// LoadPollAddressFromThread"); 1860 } 1861 #endif 1862 1863 const RegMask &MachLoadPollAddrLateNode::out_RegMask() const { 1864 return RSCRATCH1_BITS64_REG_mask(); 1865 } 1866 #endif // PPC port 1867 1868 // ============================================================================= 1869 1870 // Figure out which register class each belongs in: rc_int, rc_float or 1871 // rc_stack. 1872 enum RC { rc_bad, rc_int, rc_float, rc_stack }; 1873 1874 static enum RC rc_class(OptoReg::Name reg) { 1875 // Return the register class for the given register. The given register 1876 // reg is a <register>_num value, which is an index into the MachRegisterNumbers 1877 // enumeration in adGlobals_ppc64.hpp. 1878 1879 if (reg == OptoReg::Bad) return rc_bad; 1880 1881 // We have 64 integer register halves, starting at index 0. 1882 if (reg < 64) return rc_int; 1883 1884 // We have 64 floating-point register halves, starting at index 64. 1885 if (reg < 64+64) return rc_float; 1886 1887 // Between float regs & stack are the flags regs. 1888 assert(OptoReg::is_stack(reg), "blow up if spilling flags"); 1889 1890 return rc_stack; 1891 } 1892 1893 static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int reg, int offset, 1894 bool do_print, Compile* C, outputStream *st) { 1895 1896 assert(opcode == Assembler::LD_OPCODE || 1897 opcode == Assembler::STD_OPCODE || 1898 opcode == Assembler::LWZ_OPCODE || 1899 opcode == Assembler::STW_OPCODE || 1900 opcode == Assembler::LFD_OPCODE || 1901 opcode == Assembler::STFD_OPCODE || 1902 opcode == Assembler::LFS_OPCODE || 1903 opcode == Assembler::STFS_OPCODE, 1904 "opcode not supported"); 1905 1906 if (cbuf) { 1907 int d = 1908 (Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ? 1909 Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/) 1910 : Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build. 1911 emit_long(*cbuf, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP)); 1912 } 1913 #ifndef PRODUCT 1914 else if (do_print) { 1915 st->print("%-7s %s, [R1_SP + #%d+%d] \t// spill copy", 1916 op_str, 1917 Matcher::regName[reg], 1918 offset, 0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); 1919 } 1920 #endif 1921 return 4; // size 1922 } 1923 1924 uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const { 1925 Compile* C = ra_->C; 1926 1927 // Get registers to move. 1928 OptoReg::Name src_hi = ra_->get_reg_second(in(1)); 1929 OptoReg::Name src_lo = ra_->get_reg_first(in(1)); 1930 OptoReg::Name dst_hi = ra_->get_reg_second(this); 1931 OptoReg::Name dst_lo = ra_->get_reg_first(this); 1932 1933 enum RC src_hi_rc = rc_class(src_hi); 1934 enum RC src_lo_rc = rc_class(src_lo); 1935 enum RC dst_hi_rc = rc_class(dst_hi); 1936 enum RC dst_lo_rc = rc_class(dst_lo); 1937 1938 assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register"); 1939 if (src_hi != OptoReg::Bad) 1940 assert((src_lo&1)==0 && src_lo+1==src_hi && 1941 (dst_lo&1)==0 && dst_lo+1==dst_hi, 1942 "expected aligned-adjacent pairs"); 1943 // Generate spill code! 1944 int size = 0; 1945 1946 if (src_lo == dst_lo && src_hi == dst_hi) 1947 return size; // Self copy, no move. 1948 1949 // -------------------------------------- 1950 // Memory->Memory Spill. Use R0 to hold the value. 1951 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { 1952 int src_offset = ra_->reg2offset(src_lo); 1953 int dst_offset = ra_->reg2offset(dst_lo); 1954 if (src_hi != OptoReg::Bad) { 1955 assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack, 1956 "expected same type of move for high parts"); 1957 size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st); 1958 if (!cbuf && !do_size) st->print("\n\t"); 1959 size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st); 1960 } else { 1961 size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st); 1962 if (!cbuf && !do_size) st->print("\n\t"); 1963 size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st); 1964 } 1965 return size; 1966 } 1967 1968 // -------------------------------------- 1969 // Check for float->int copy; requires a trip through memory. 1970 if (src_lo_rc == rc_float && dst_lo_rc == rc_int) { 1971 Unimplemented(); 1972 } 1973 1974 // -------------------------------------- 1975 // Check for integer reg-reg copy. 1976 if (src_lo_rc == rc_int && dst_lo_rc == rc_int) { 1977 Register Rsrc = as_Register(Matcher::_regEncode[src_lo]); 1978 Register Rdst = as_Register(Matcher::_regEncode[dst_lo]); 1979 size = (Rsrc != Rdst) ? 4 : 0; 1980 1981 if (cbuf) { 1982 MacroAssembler _masm(cbuf); 1983 if (size) { 1984 __ mr(Rdst, Rsrc); 1985 } 1986 } 1987 #ifndef PRODUCT 1988 else if (!do_size) { 1989 if (size) { 1990 st->print("%-7s %s, %s \t// spill copy", "MR", Matcher::regName[dst_lo], Matcher::regName[src_lo]); 1991 } else { 1992 st->print("%-7s %s, %s \t// spill copy", "MR-NOP", Matcher::regName[dst_lo], Matcher::regName[src_lo]); 1993 } 1994 } 1995 #endif 1996 return size; 1997 } 1998 1999 // Check for integer store. 2000 if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) { 2001 int dst_offset = ra_->reg2offset(dst_lo); 2002 if (src_hi != OptoReg::Bad) { 2003 assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack, 2004 "expected same type of move for high parts"); 2005 size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st); 2006 } else { 2007 size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st); 2008 } 2009 return size; 2010 } 2011 2012 // Check for integer load. 2013 if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) { 2014 int src_offset = ra_->reg2offset(src_lo); 2015 if (src_hi != OptoReg::Bad) { 2016 assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack, 2017 "expected same type of move for high parts"); 2018 size += ld_st_helper(cbuf, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st); 2019 } else { 2020 size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st); 2021 } 2022 return size; 2023 } 2024 2025 // Check for float reg-reg copy. 2026 if (src_lo_rc == rc_float && dst_lo_rc == rc_float) { 2027 if (cbuf) { 2028 MacroAssembler _masm(cbuf); 2029 FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]); 2030 FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]); 2031 __ fmr(Rdst, Rsrc); 2032 } 2033 #ifndef PRODUCT 2034 else if (!do_size) { 2035 st->print("%-7s %s, %s \t// spill copy", "FMR", Matcher::regName[dst_lo], Matcher::regName[src_lo]); 2036 } 2037 #endif 2038 return 4; 2039 } 2040 2041 // Check for float store. 2042 if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) { 2043 int dst_offset = ra_->reg2offset(dst_lo); 2044 if (src_hi != OptoReg::Bad) { 2045 assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack, 2046 "expected same type of move for high parts"); 2047 size += ld_st_helper(cbuf, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st); 2048 } else { 2049 size += ld_st_helper(cbuf, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st); 2050 } 2051 return size; 2052 } 2053 2054 // Check for float load. 2055 if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) { 2056 int src_offset = ra_->reg2offset(src_lo); 2057 if (src_hi != OptoReg::Bad) { 2058 assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack, 2059 "expected same type of move for high parts"); 2060 size += ld_st_helper(cbuf, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st); 2061 } else { 2062 size += ld_st_helper(cbuf, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st); 2063 } 2064 return size; 2065 } 2066 2067 // -------------------------------------------------------------------- 2068 // Check for hi bits still needing moving. Only happens for misaligned 2069 // arguments to native calls. 2070 if (src_hi == dst_hi) 2071 return size; // Self copy; no move. 2072 2073 assert(src_hi_rc != rc_bad && dst_hi_rc != rc_bad, "src_hi & dst_hi cannot be Bad"); 2074 ShouldNotReachHere(); // Unimplemented 2075 return 0; 2076 } 2077 2078 #ifndef PRODUCT 2079 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 2080 if (!ra_) 2081 st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx); 2082 else 2083 implementation(NULL, ra_, false, st); 2084 } 2085 #endif 2086 2087 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 2088 implementation(&cbuf, ra_, false, NULL); 2089 } 2090 2091 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { 2092 return implementation(NULL, ra_, true, NULL); 2093 } 2094 2095 #if 0 // TODO: PPC port 2096 ArchOpcode MachSpillCopyNode_archOpcode(MachSpillCopyNode *n, PhaseRegAlloc *ra_) { 2097 #ifndef PRODUCT 2098 if (ra_->node_regs_max_index() == 0) return archOpcode_undefined; 2099 #endif 2100 assert(ra_->node_regs_max_index() != 0, ""); 2101 2102 // Get registers to move. 2103 OptoReg::Name src_hi = ra_->get_reg_second(n->in(1)); 2104 OptoReg::Name src_lo = ra_->get_reg_first(n->in(1)); 2105 OptoReg::Name dst_hi = ra_->get_reg_second(n); 2106 OptoReg::Name dst_lo = ra_->get_reg_first(n); 2107 2108 enum RC src_lo_rc = rc_class(src_lo); 2109 enum RC dst_lo_rc = rc_class(dst_lo); 2110 2111 if (src_lo == dst_lo && src_hi == dst_hi) 2112 return ppc64Opcode_none; // Self copy, no move. 2113 2114 // -------------------------------------- 2115 // Memory->Memory Spill. Use R0 to hold the value. 2116 if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { 2117 return ppc64Opcode_compound; 2118 } 2119 2120 // -------------------------------------- 2121 // Check for float->int copy; requires a trip through memory. 2122 if (src_lo_rc == rc_float && dst_lo_rc == rc_int) { 2123 Unimplemented(); 2124 } 2125 2126 // -------------------------------------- 2127 // Check for integer reg-reg copy. 2128 if (src_lo_rc == rc_int && dst_lo_rc == rc_int) { 2129 Register Rsrc = as_Register(Matcher::_regEncode[src_lo]); 2130 Register Rdst = as_Register(Matcher::_regEncode[dst_lo]); 2131 if (Rsrc == Rdst) { 2132 return ppc64Opcode_none; 2133 } else { 2134 return ppc64Opcode_or; 2135 } 2136 } 2137 2138 // Check for integer store. 2139 if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) { 2140 if (src_hi != OptoReg::Bad) { 2141 return ppc64Opcode_std; 2142 } else { 2143 return ppc64Opcode_stw; 2144 } 2145 } 2146 2147 // Check for integer load. 2148 if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) { 2149 if (src_hi != OptoReg::Bad) { 2150 return ppc64Opcode_ld; 2151 } else { 2152 return ppc64Opcode_lwz; 2153 } 2154 } 2155 2156 // Check for float reg-reg copy. 2157 if (src_lo_rc == rc_float && dst_lo_rc == rc_float) { 2158 return ppc64Opcode_fmr; 2159 } 2160 2161 // Check for float store. 2162 if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) { 2163 if (src_hi != OptoReg::Bad) { 2164 return ppc64Opcode_stfd; 2165 } else { 2166 return ppc64Opcode_stfs; 2167 } 2168 } 2169 2170 // Check for float load. 2171 if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) { 2172 if (src_hi != OptoReg::Bad) { 2173 return ppc64Opcode_lfd; 2174 } else { 2175 return ppc64Opcode_lfs; 2176 } 2177 } 2178 2179 // -------------------------------------------------------------------- 2180 // Check for hi bits still needing moving. Only happens for misaligned 2181 // arguments to native calls. 2182 if (src_hi == dst_hi) 2183 return ppc64Opcode_none; // Self copy; no move. 2184 2185 ShouldNotReachHere(); 2186 return ppc64Opcode_undefined; 2187 } 2188 #endif // PPC port 2189 2190 #ifndef PRODUCT 2191 void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 2192 st->print("NOP \t// %d nops to pad for loops.", _count); 2193 } 2194 #endif 2195 2196 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const { 2197 MacroAssembler _masm(&cbuf); 2198 // _count contains the number of nops needed for padding. 2199 for (int i = 0; i < _count; i++) { 2200 __ nop(); 2201 } 2202 } 2203 2204 uint MachNopNode::size(PhaseRegAlloc *ra_) const { 2205 return _count * 4; 2206 } 2207 2208 #ifndef PRODUCT 2209 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 2210 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 2211 int reg = ra_->get_reg_first(this); 2212 st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset); 2213 } 2214 #endif 2215 2216 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 2217 MacroAssembler _masm(&cbuf); 2218 2219 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 2220 int reg = ra_->get_encode(this); 2221 2222 if (Assembler::is_simm(offset, 16)) { 2223 __ addi(as_Register(reg), R1, offset); 2224 } else { 2225 ShouldNotReachHere(); 2226 } 2227 } 2228 2229 uint BoxLockNode::size(PhaseRegAlloc *ra_) const { 2230 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_). 2231 return 4; 2232 } 2233 2234 #ifndef PRODUCT 2235 void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const { 2236 st->print_cr("---- MachUEPNode ----"); 2237 st->print_cr("..."); 2238 } 2239 #endif 2240 2241 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 2242 // This is the unverified entry point. 2243 MacroAssembler _masm(&cbuf); 2244 2245 // Inline_cache contains a klass. 2246 Register ic_klass = as_Register(Matcher::inline_cache_reg_encode()); 2247 Register receiver_klass = R0; // tmp 2248 2249 assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1); 2250 assert(R11_scratch1 == R11, "need prologue scratch register"); 2251 2252 // Check for NULL argument if we don't have implicit null checks. 2253 if (!ImplicitNullChecks || !os::zero_page_read_protected()) { 2254 if (TrapBasedNullChecks) { 2255 __ trap_null_check(R3_ARG1); 2256 } else { 2257 Label valid; 2258 __ cmpdi(CCR0, R3_ARG1, 0); 2259 __ bne_predict_taken(CCR0, valid); 2260 // We have a null argument, branch to ic_miss_stub. 2261 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2262 relocInfo::runtime_call_type); 2263 __ bind(valid); 2264 } 2265 } 2266 // Assume argument is not NULL, load klass from receiver. 2267 __ load_klass(receiver_klass, R3_ARG1); 2268 2269 if (TrapBasedICMissChecks) { 2270 __ trap_ic_miss_check(receiver_klass, ic_klass); 2271 } else { 2272 Label valid; 2273 __ cmpd(CCR0, receiver_klass, ic_klass); 2274 __ beq_predict_taken(CCR0, valid); 2275 // We have an unexpected klass, branch to ic_miss_stub. 2276 __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), 2277 relocInfo::runtime_call_type); 2278 __ bind(valid); 2279 } 2280 2281 // Argument is valid and klass is as expected, continue. 2282 } 2283 2284 #if 0 // TODO: PPC port 2285 // Optimize UEP code on z (save a load_const() call in main path). 2286 int MachUEPNode::ep_offset() { 2287 return 0; 2288 } 2289 #endif 2290 2291 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 2292 // Variable size. Determine dynamically. 2293 return MachNode::size(ra_); 2294 } 2295 2296 //============================================================================= 2297 2298 uint size_exception_handler() { 2299 // The exception_handler is a b64_patchable. 2300 return MacroAssembler::b64_patchable_size; 2301 } 2302 2303 uint size_deopt_handler() { 2304 // The deopt_handler is a bl64_patchable. 2305 return MacroAssembler::bl64_patchable_size; 2306 } 2307 2308 int emit_exception_handler(CodeBuffer &cbuf) { 2309 MacroAssembler _masm(&cbuf); 2310 2311 address base = __ start_a_stub(size_exception_handler()); 2312 if (base == NULL) return 0; // CodeBuffer::expand failed 2313 2314 int offset = __ offset(); 2315 __ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(), 2316 relocInfo::runtime_call_type); 2317 assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size"); 2318 __ end_a_stub(); 2319 2320 return offset; 2321 } 2322 2323 // The deopt_handler is like the exception handler, but it calls to 2324 // the deoptimization blob instead of jumping to the exception blob. 2325 int emit_deopt_handler(CodeBuffer& cbuf) { 2326 MacroAssembler _masm(&cbuf); 2327 2328 address base = __ start_a_stub(size_deopt_handler()); 2329 if (base == NULL) return 0; // CodeBuffer::expand failed 2330 2331 int offset = __ offset(); 2332 __ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(), 2333 relocInfo::runtime_call_type); 2334 assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size"); 2335 __ end_a_stub(); 2336 2337 return offset; 2338 } 2339 2340 //============================================================================= 2341 2342 // Given a register encoding, produce an Integer Register object. 2343 static Register reg_to_register_object(int register_encoding) { 2344 // Verify ppc encoding by example. 2345 assert(R12->encoding() == R12_enc, "wrong coding"); 2346 return as_Register(register_encoding); 2347 } 2348 2349 // Given a register encoding, produce a Float Register object. 2350 static FloatRegister reg_to_FloatRegister_object(int register_encoding) { 2351 // Verify ppc encoding by example. 2352 assert(F5->encoding() == F5_enc, "wrong coding"); 2353 return as_FloatRegister(register_encoding); 2354 } 2355 // Given a register encoding, produce a ConditionRegister object. 2356 static ConditionRegister reg_to_ConditionRegister_object(int register_encoding) { 2357 // Verify ppc encoding by example. 2358 assert(CCR5->encoding() == CCR5_enc, "wrong coding"); 2359 return as_ConditionRegister(register_encoding); 2360 } 2361 2362 const bool Matcher::match_rule_supported(int opcode) { 2363 if (!has_match_rule(opcode)) 2364 return false; 2365 2366 switch (opcode) { 2367 case Op_SqrtD: 2368 return VM_Version::has_fsqrt(); 2369 case Op_CountLeadingZerosI: 2370 case Op_CountLeadingZerosL: 2371 case Op_CountTrailingZerosI: 2372 case Op_CountTrailingZerosL: 2373 if (!UseCountLeadingZerosInstructionsPPC64) 2374 return false; 2375 break; 2376 2377 case Op_PopCountI: 2378 case Op_PopCountL: 2379 return (UsePopCountInstruction && VM_Version::has_popcntw()); 2380 2381 case Op_StrComp: 2382 return SpecialStringCompareTo; 2383 case Op_StrEquals: 2384 return SpecialStringEquals; 2385 case Op_StrIndexOf: 2386 return SpecialStringIndexOf; 2387 } 2388 2389 return true; // Per default match rules are supported. 2390 } 2391 2392 int Matcher::regnum_to_fpu_offset(int regnum) { 2393 // No user for this method? 2394 Unimplemented(); 2395 return 999; 2396 } 2397 2398 const bool Matcher::convL2FSupported(void) { 2399 // fcfids can do the conversion (>= Power7). 2400 // fcfid + frsp showed rounding problem when result should be 0x3f800001. 2401 return VM_Version::has_fcfids(); // False means that conversion is done by runtime call. 2402 } 2403 2404 // Map Types to machine register types 2405 const int Matcher::base2reg[Type::lastype] = { 2406 Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN, 2407 Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */ 2408 0, Op_RegL, 0, 0, /* Vectors */ 2409 Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */ 2410 0, 0/*abio*/, 2411 Op_RegP /* Return address */, 0, /* the memories */ 2412 Op_RegF, Op_RegF, Op_RegF, Op_RegD, Op_RegD, Op_RegD, 2413 0 /*bottom*/ 2414 }; 2415 2416 // Vector width in bytes. 2417 const int Matcher::vector_width_in_bytes(BasicType bt) { 2418 assert(MaxVectorSize == 8, ""); 2419 return 8; 2420 } 2421 2422 // Vector ideal reg. 2423 const int Matcher::vector_ideal_reg(int size) { 2424 assert(MaxVectorSize == 8 && size == 8, ""); 2425 return Op_RegL; 2426 } 2427 2428 const int Matcher::vector_shift_count_ideal_reg(int size) { 2429 fatal("vector shift is not supported"); 2430 return Node::NotAMachineReg; 2431 } 2432 2433 // Limits on vector size (number of elements) loaded into vector. 2434 const int Matcher::max_vector_size(const BasicType bt) { 2435 assert(is_java_primitive(bt), "only primitive type vectors"); 2436 return vector_width_in_bytes(bt)/type2aelembytes(bt); 2437 } 2438 2439 const int Matcher::min_vector_size(const BasicType bt) { 2440 return max_vector_size(bt); // Same as max. 2441 } 2442 2443 // PPC doesn't support misaligned vectors store/load. 2444 const bool Matcher::misaligned_vectors_ok() { 2445 return false; 2446 } 2447 2448 // RETURNS: whether this branch offset is short enough that a short 2449 // branch can be used. 2450 // 2451 // If the platform does not provide any short branch variants, then 2452 // this method should return `false' for offset 0. 2453 // 2454 // `Compile::Fill_buffer' will decide on basis of this information 2455 // whether to do the pass `Compile::Shorten_branches' at all. 2456 // 2457 // And `Compile::Shorten_branches' will decide on basis of this 2458 // information whether to replace particular branch sites by short 2459 // ones. 2460 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { 2461 // Is the offset within the range of a ppc64 pc relative branch? 2462 bool b; 2463 2464 const int safety_zone = 3 * BytesPerInstWord; 2465 b = Assembler::is_simm((offset<0 ? offset-safety_zone : offset+safety_zone), 2466 29 - 16 + 1 + 2); 2467 return b; 2468 } 2469 2470 const bool Matcher::isSimpleConstant64(jlong value) { 2471 // Probably always true, even if a temp register is required. 2472 return true; 2473 } 2474 /* TODO: PPC port 2475 // Make a new machine dependent decode node (with its operands). 2476 MachTypeNode *Matcher::make_decode_node(Compile *C) { 2477 assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0, 2478 "This method is only implemented for unscaled cOops mode so far"); 2479 MachTypeNode *decode = new (C) decodeN_unscaledNode(); 2480 decode->set_opnd_array(0, new (C) iRegPdstOper()); 2481 decode->set_opnd_array(1, new (C) iRegNsrcOper()); 2482 return decode; 2483 } 2484 */ 2485 // Threshold size for cleararray. 2486 const int Matcher::init_array_short_size = 8 * BytesPerLong; 2487 2488 // false => size gets scaled to BytesPerLong, ok. 2489 const bool Matcher::init_array_count_is_in_bytes = false; 2490 2491 // Use conditional move (CMOVL) on Power7. 2492 const int Matcher::long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves 2493 2494 // Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet. 2495 // fsel doesn't accept a condition register as input, so this would be slightly different. 2496 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; } 2497 2498 // Power6 requires late expand (see block.cpp for description of late expand). 2499 const bool Matcher::require_late_expand = true; 2500 2501 // Should the Matcher clone shifts on addressing modes, expecting them to 2502 // be subsumed into complex addressing expressions or compute them into 2503 // registers? True for Intel but false for most RISCs. 2504 const bool Matcher::clone_shift_expressions = false; 2505 2506 // Do we need to mask the count passed to shift instructions or does 2507 // the cpu only look at the lower 5/6 bits anyway? 2508 // PowerPC requires masked shift counts. 2509 const bool Matcher::need_masked_shift_count = true; 2510 2511 // This affects two different things: 2512 // - how Decode nodes are matched 2513 // - how ImplicitNullCheck opportunities are recognized 2514 // If true, the matcher will try to remove all Decodes and match them 2515 // (as operands) into nodes. NullChecks are not prepared to deal with 2516 // Decodes by final_graph_reshaping(). 2517 // If false, final_graph_reshaping() forces the decode behind the Cmp 2518 // for a NullCheck. The matcher matches the Decode node into a register. 2519 // Implicit_null_check optimization moves the Decode along with the 2520 // memory operation back up before the NullCheck. 2521 bool Matcher::narrow_oop_use_complex_address() { 2522 // TODO: PPC port if (MatchDecodeNodes) return true; 2523 return false; 2524 } 2525 2526 // Is it better to copy float constants, or load them directly from memory? 2527 // Intel can load a float constant from a direct address, requiring no 2528 // extra registers. Most RISCs will have to materialize an address into a 2529 // register first, so they would do better to copy the constant from stack. 2530 const bool Matcher::rematerialize_float_constants = false; 2531 2532 // If CPU can load and store mis-aligned doubles directly then no fixup is 2533 // needed. Else we split the double into 2 integer pieces and move it 2534 // piece-by-piece. Only happens when passing doubles into C code as the 2535 // Java calling convention forces doubles to be aligned. 2536 const bool Matcher::misaligned_doubles_ok = true; 2537 2538 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { 2539 Unimplemented(); 2540 } 2541 2542 // Advertise here if the CPU requires explicit rounding operations 2543 // to implement the UseStrictFP mode. 2544 const bool Matcher::strict_fp_requires_explicit_rounding = false; 2545 2546 // Do floats take an entire double register or just half? 2547 // 2548 // A float occupies a ppc64 double register. For the allocator, a 2549 // ppc64 double register appears as a pair of float registers. 2550 bool Matcher::float_in_double() { return true; } 2551 2552 // Do ints take an entire long register or just half? 2553 // The relevant question is how the int is callee-saved: 2554 // the whole long is written but de-opt'ing will have to extract 2555 // the relevant 32 bits. 2556 const bool Matcher::int_in_long = true; 2557 2558 // Constants for c2c and c calling conventions. 2559 2560 const MachRegisterNumbers iarg_reg[8] = { 2561 R3_num, R4_num, R5_num, R6_num, 2562 R7_num, R8_num, R9_num, R10_num 2563 }; 2564 2565 const MachRegisterNumbers farg_reg[13] = { 2566 F1_num, F2_num, F3_num, F4_num, 2567 F5_num, F6_num, F7_num, F8_num, 2568 F9_num, F10_num, F11_num, F12_num, 2569 F13_num 2570 }; 2571 2572 const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]); 2573 2574 const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]); 2575 2576 // Return whether or not this register is ever used as an argument. This 2577 // function is used on startup to build the trampoline stubs in generateOptoStub. 2578 // Registers not mentioned will be killed by the VM call in the trampoline, and 2579 // arguments in those registers not be available to the callee. 2580 bool Matcher::can_be_java_arg(int reg) { 2581 // We return true for all registers contained in iarg_reg[] and 2582 // farg_reg[] and their virtual halves. 2583 // We must include the virtual halves in order to get STDs and LDs 2584 // instead of STWs and LWs in the trampoline stubs. 2585 2586 if ( reg == R3_num || reg == R3_H_num 2587 || reg == R4_num || reg == R4_H_num 2588 || reg == R5_num || reg == R5_H_num 2589 || reg == R6_num || reg == R6_H_num 2590 || reg == R7_num || reg == R7_H_num 2591 || reg == R8_num || reg == R8_H_num 2592 || reg == R9_num || reg == R9_H_num 2593 || reg == R10_num || reg == R10_H_num) 2594 return true; 2595 2596 if ( reg == F1_num || reg == F1_H_num 2597 || reg == F2_num || reg == F2_H_num 2598 || reg == F3_num || reg == F3_H_num 2599 || reg == F4_num || reg == F4_H_num 2600 || reg == F5_num || reg == F5_H_num 2601 || reg == F6_num || reg == F6_H_num 2602 || reg == F7_num || reg == F7_H_num 2603 || reg == F8_num || reg == F8_H_num 2604 || reg == F9_num || reg == F9_H_num 2605 || reg == F10_num || reg == F10_H_num 2606 || reg == F11_num || reg == F11_H_num 2607 || reg == F12_num || reg == F12_H_num 2608 || reg == F13_num || reg == F13_H_num) 2609 return true; 2610 2611 return false; 2612 } 2613 2614 bool Matcher::is_spillable_arg(int reg) { 2615 return can_be_java_arg(reg); 2616 } 2617 2618 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { 2619 return false; 2620 } 2621 2622 // Register for DIVI projection of divmodI. 2623 RegMask Matcher::divI_proj_mask() { 2624 ShouldNotReachHere(); 2625 return RegMask(); 2626 } 2627 2628 // Register for MODI projection of divmodI. 2629 RegMask Matcher::modI_proj_mask() { 2630 ShouldNotReachHere(); 2631 return RegMask(); 2632 } 2633 2634 // Register for DIVL projection of divmodL. 2635 RegMask Matcher::divL_proj_mask() { 2636 ShouldNotReachHere(); 2637 return RegMask(); 2638 } 2639 2640 // Register for MODL projection of divmodL. 2641 RegMask Matcher::modL_proj_mask() { 2642 ShouldNotReachHere(); 2643 return RegMask(); 2644 } 2645 2646 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2647 return RegMask(); 2648 } 2649 2650 %} 2651 2652 //----------ENCODING BLOCK----------------------------------------------------- 2653 // This block specifies the encoding classes used by the compiler to output 2654 // byte streams. Encoding classes are parameterized macros used by 2655 // Machine Instruction Nodes in order to generate the bit encoding of the 2656 // instruction. Operands specify their base encoding interface with the 2657 // interface keyword. There are currently supported four interfaces, 2658 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an 2659 // operand to generate a function which returns its register number when 2660 // queried. CONST_INTER causes an operand to generate a function which 2661 // returns the value of the constant when queried. MEMORY_INTER causes an 2662 // operand to generate four functions which return the Base Register, the 2663 // Index Register, the Scale Value, and the Offset Value of the operand when 2664 // queried. COND_INTER causes an operand to generate six functions which 2665 // return the encoding code (ie - encoding bits for the instruction) 2666 // associated with each basic boolean condition for a conditional instruction. 2667 // 2668 // Instructions specify two basic values for encoding. Again, a function 2669 // is available to check if the constant displacement is an oop. They use the 2670 // ins_encode keyword to specify their encoding classes (which must be 2671 // a sequence of enc_class names, and their parameters, specified in 2672 // the encoding block), and they use the 2673 // opcode keyword to specify, in order, their primary, secondary, and 2674 // tertiary opcode. Only the opcode sections which a particular instruction 2675 // needs for encoding need to be specified. 2676 encode %{ 2677 enc_class enc_unimplemented %{ 2678 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 2679 MacroAssembler _masm(&cbuf); 2680 __ unimplemented("Unimplemented mach node encoding in AD file.", 13); 2681 %} 2682 2683 enc_class enc_untested %{ 2684 #ifdef ASSERT 2685 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 2686 MacroAssembler _masm(&cbuf); 2687 __ untested("Untested mach node encoding in AD file."); 2688 #else 2689 // TODO: PPC port $archOpcode(ppc64Opcode_none); 2690 #endif 2691 %} 2692 2693 // Use release_store for card-marking to ensure that previous 2694 // oop-stores are visible before the card-mark change. 2695 enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr) %{ 2696 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 2697 // FIXME: Implement this as a cmove and use a fixed condition code 2698 // register which is written on every transition to compiled code, 2699 // e.g. in call-stub and when returning from runtime stubs. 2700 // 2701 // Proposed code sequence for the cmove implementation: 2702 // 2703 // Label skip_release; 2704 // __ beq(CCRfixed, skip_release); 2705 // __ release(); 2706 // __ bind(skip_release); 2707 // __ stb(card mark); 2708 2709 MacroAssembler _masm(&cbuf); 2710 Label skip_release; 2711 2712 #if 0 // TODO: PPC port 2713 // Check CMSCollectorCardTableModRefBSExt::_requires_release and do the 2714 // release conditionally. 2715 __ lwz(R0, 0, reg_to_register_object($releaseFieldAddr$$reg)); 2716 __ cmpwi(CCR0, R0, 0); 2717 __ beq_predict_taken(CCR0, skip_release); 2718 #endif 2719 __ li(R0, 0); 2720 __ release(); 2721 #if 0 // TODO: PPC port 2722 __ bind(skip_release); 2723 #endif 2724 2725 // Do the store. 2726 if ($mem$$index == 0) { 2727 __ stb(R0, $mem$$disp, reg_to_register_object($mem$$base)); 2728 } else { 2729 assert(0 == $mem$$disp, "no displacement possible with indexed load/stores on ppc"); 2730 __ stbx(R0, reg_to_register_object($mem$$base), reg_to_register_object($mem$$index)); 2731 } 2732 %} 2733 2734 enc_class enc_andi(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{ 2735 // TODO: PPC port $archOpcode(ppc64Opcode_andi_); 2736 2737 MacroAssembler _masm(&cbuf); 2738 Register Rdst = reg_to_register_object($dst$$reg); 2739 Register Rsrc1 = reg_to_register_object($src1$$reg); 2740 int Isrc2 = $src2$$constant; 2741 // FIXME: avoid andi_ ? 2742 __ andi_(Rdst, Rsrc1, Isrc2); 2743 %} 2744 2745 enc_class enc_xori(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{ 2746 // TODO: PPC port $archOpcode(ppc64Opcode_xori); 2747 2748 MacroAssembler _masm(&cbuf); 2749 Register Rdst = reg_to_register_object($dst$$reg); 2750 Register Rsrc1 = reg_to_register_object($src1$$reg); int Isrc2 = $src2$$constant; 2751 __ xori(Rdst, Rsrc1, Isrc2); 2752 %} 2753 2754 enc_class enc_ori(iRegIdst dst, iRegIsrc src1, uimmI src2) %{ 2755 // TODO: PPC port $archOpcode(ppc64Opcode_ori); 2756 2757 MacroAssembler _masm(&cbuf); 2758 Register Rdst = reg_to_register_object($dst$$reg); 2759 Register Rsrc1 = reg_to_register_object($src1$$reg); 2760 int Isrc2 = ($src2$$constant) & 0xFFFF; 2761 __ ori(Rdst, Rsrc1, Isrc2); 2762 %} 2763 2764 enc_class enc_addi(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 2765 // TODO: PPC port $archOpcode(ppc64Opcode_addi); 2766 2767 MacroAssembler _masm(&cbuf); 2768 Register Rdst = reg_to_register_object($dst$$reg); 2769 Register Rsrc1 = reg_to_register_object($src1$$reg); 2770 int Isrc2 = $src2$$constant; 2771 __ addi(Rdst, Rsrc1, Isrc2); 2772 %} 2773 2774 enc_class enc_addis(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 2775 // TODO: PPC port $archOpcode(ppc64Opcode_addis); 2776 2777 MacroAssembler _masm(&cbuf); 2778 Register Rdst = reg_to_register_object($dst$$reg); 2779 Register Rsrc1 = reg_to_register_object($src1$$reg); 2780 int Isrc2 = ($src2$$constant)>>16; 2781 __ addis(Rdst, Rsrc1, Isrc2); 2782 %} 2783 2784 enc_class enc_addi_neg(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 2785 // TODO: PPC port $archOpcode(ppc64Opcode_addi); 2786 2787 MacroAssembler _masm(&cbuf); 2788 Register Rdst = reg_to_register_object($dst$$reg); 2789 Register Rsrc1 = reg_to_register_object($src1$$reg); 2790 int Isrc2 = ($src2$$constant) * (-1) ; 2791 __ addi(Rdst, Rsrc1, Isrc2); 2792 %} 2793 2794 enc_class enc_subfic(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 2795 // TODO: PPC port $archOpcode(ppc64Opcode_subfic); 2796 2797 MacroAssembler _masm(&cbuf); 2798 Register Rdst = reg_to_register_object($dst$$reg); 2799 Register Rsrc1 = reg_to_register_object($src1$$reg); 2800 int Isrc2 = $src2$$constant; 2801 __ subfic(Rdst, Rsrc1, Isrc2); 2802 %} 2803 2804 enc_class enc_mulli(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 2805 // TODO: PPC port $archOpcode(ppc64Opcode_mulli); 2806 2807 MacroAssembler _masm(&cbuf); 2808 Register Rdst = reg_to_register_object($dst$$reg); 2809 Register Rsrc1 = reg_to_register_object($src1$$reg); 2810 int Isrc2 = $src2$$constant; 2811 __ mulli(Rdst, Rsrc1, Isrc2); 2812 %} 2813 2814 enc_class enc_neg(iRegIdst dst, iRegIsrc src1) %{ 2815 // TODO: PPC port $archOpcode(ppc64Opcode_neg); 2816 2817 MacroAssembler _masm(&cbuf); 2818 Register Rdst = reg_to_register_object($dst$$reg); 2819 Register Rsrc1 = reg_to_register_object($src1$$reg); 2820 __ neg(Rdst, Rsrc1); 2821 %} 2822 2823 enc_class enc_fcfid(RegD dst, RegD tmp) %{ 2824 // TODO: PPC port $archOpcode(ppc64Opcode_fcfid); 2825 2826 MacroAssembler _masm(&cbuf); 2827 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2828 FloatRegister Rtmp = reg_to_FloatRegister_object($tmp$$reg); 2829 __ fcfid(Rdst, Rtmp); 2830 %} 2831 2832 enc_class enc_fcfids(RegF dst, RegD tmp) %{ 2833 // TODO: PPC port $archOpcode(ppc64Opcode_fcfid); 2834 2835 MacroAssembler _masm(&cbuf); 2836 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2837 FloatRegister Rtmp = reg_to_FloatRegister_object($tmp$$reg); 2838 __ fcfids(Rdst, Rtmp); 2839 %} 2840 2841 enc_class enc_extsw(iRegLdst dst, iRegIsrc src1) %{ 2842 // TODO: PPC port $archOpcode(ppc64Opcode_extsw); 2843 2844 MacroAssembler _masm(&cbuf); 2845 Register Rdst = reg_to_register_object($dst$$reg); 2846 Register Rsrc1 = reg_to_register_object($src1$$reg); 2847 __ extsw(Rdst, Rsrc1); 2848 %} 2849 2850 enc_class enc_extswII(iRegIdst dst, iRegIsrc src1) %{ 2851 // TODO: PPC port $archOpcode(ppc64Opcode_extsw); 2852 2853 MacroAssembler _masm(&cbuf); 2854 Register Rdst = reg_to_register_object($dst$$reg); 2855 Register Rsrc1 = reg_to_register_object($src1$$reg); 2856 __ extsw(Rdst, Rsrc1); 2857 %} 2858 2859 enc_class enc_extswLL(iRegLdst dst, iRegLsrc src1) %{ 2860 // TODO: PPC port $archOpcode(ppc64Opcode_extsw); 2861 2862 MacroAssembler _masm(&cbuf); 2863 Register Rdst = reg_to_register_object($dst$$reg); 2864 Register Rsrc1 = reg_to_register_object($src1$$reg); 2865 __ extsw(Rdst, Rsrc1); 2866 %} 2867 2868 enc_class enc_extsh(iRegLdst dst, iRegIsrc src1) %{ 2869 // TODO: PPC port $archOpcode(ppc64Opcode_extsh); 2870 2871 MacroAssembler _masm(&cbuf); 2872 Register Rdst = reg_to_register_object($dst$$reg); 2873 Register Rsrc1 = reg_to_register_object($src1$$reg); 2874 __ extsh(Rdst, Rsrc1); 2875 %} 2876 2877 enc_class enc_extsb(iRegIdst dst, iRegIsrc src1) %{ 2878 // TODO: PPC port $archOpcode(ppc64Opcode_extsb); 2879 2880 MacroAssembler _masm(&cbuf); 2881 Register Rdst = reg_to_register_object($dst$$reg); 2882 Register Rsrc1 = reg_to_register_object($src1$$reg); 2883 __ extsb(Rdst, Rsrc1); 2884 %} 2885 2886 enc_class enc_not(iRegIdst dst, iRegIsrc src1) %{ 2887 // TODO: PPC port $archOpcode(ppc64Opcode_nor); 2888 2889 MacroAssembler _masm(&cbuf); 2890 Register Rdst = reg_to_register_object($dst$$reg); 2891 Register Rsrc1 = reg_to_register_object($src1$$reg); 2892 __ nor(Rdst, Rsrc1, Rsrc1); 2893 %} 2894 2895 enc_class enc_andc(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 2896 // TODO: PPC port $archOpcode(ppc64Opcode_andc); 2897 2898 MacroAssembler _masm(&cbuf); 2899 Register Rdst = reg_to_register_object($dst$$reg); 2900 Register Rsrc1 = reg_to_register_object($src1$$reg); 2901 Register Rsrc2 = reg_to_register_object($src2$$reg); 2902 __ andc(Rdst, Rsrc1, Rsrc2); 2903 %} 2904 2905 enc_class enc_and(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 2906 // TODO: PPC port $archOpcode(ppc64Opcode_and); 2907 2908 MacroAssembler _masm(&cbuf); 2909 Register Rdst = reg_to_register_object($dst$$reg); 2910 Register Rsrc1 = reg_to_register_object($src1$$reg); 2911 Register Rsrc2 = reg_to_register_object($src2$$reg); 2912 __ andr(Rdst, Rsrc1, Rsrc2); 2913 %} 2914 2915 enc_class enc_or(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 2916 // TODO: PPC port $archOpcode(ppc64Opcode_or); 2917 2918 MacroAssembler _masm(&cbuf); 2919 Register Rdst = reg_to_register_object($dst$$reg); 2920 Register Rsrc1 = reg_to_register_object($src1$$reg); 2921 Register Rsrc2 = reg_to_register_object($src2$$reg); 2922 __ or_unchecked(Rdst, Rsrc1, Rsrc2); 2923 %} 2924 2925 enc_class enc_xor(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 2926 // TODO: PPC port $archOpcode(ppc64Opcode_xor); 2927 2928 MacroAssembler _masm(&cbuf); 2929 Register Rdst = reg_to_register_object($dst$$reg); 2930 Register Rsrc1 = reg_to_register_object($src1$$reg); 2931 Register Rsrc2 = reg_to_register_object($src2$$reg); 2932 __ xorr(Rdst, Rsrc1, Rsrc2); 2933 %} 2934 2935 enc_class enc_fadds(RegF dst, RegF src1, RegF src2) %{ 2936 // TODO: PPC port $archOpcode(ppc64Opcode_fadds); 2937 2938 MacroAssembler _masm(&cbuf); 2939 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2940 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 2941 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 2942 __ fadds(Rdst, Rsrc1, Rsrc2); 2943 %} 2944 2945 enc_class enc_fadd(RegF dst, RegF src1, RegF src2) %{ 2946 // TODO: PPC port $archOpcode(ppc64Opcode_fadd); 2947 2948 MacroAssembler _masm(&cbuf); 2949 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2950 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 2951 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 2952 __ fadd(Rdst, Rsrc1, Rsrc2); 2953 %} 2954 2955 enc_class enc_fsubs(RegF dst, RegF src1, RegF src2) %{ 2956 // TODO: PPC port $archOpcode(ppc64Opcode_fsubs); 2957 2958 MacroAssembler _masm(&cbuf); 2959 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2960 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 2961 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 2962 __ fsubs(Rdst, Rsrc1, Rsrc2); 2963 %} 2964 2965 enc_class enc_fsub(RegF dst, RegF src1, RegF src2) %{ 2966 // TODO: PPC port $archOpcode(ppc64Opcode_fsub); 2967 2968 MacroAssembler _masm(&cbuf); 2969 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2970 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 2971 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 2972 __ fsub(Rdst, Rsrc1, Rsrc2); 2973 %} 2974 2975 enc_class enc_fdivs(RegF dst, RegF src1, RegR src2) %{ 2976 // TODO: PPC port $archOpcode(ppc64Opcode_fdivs); 2977 2978 MacroAssembler _masm(&cbuf); 2979 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 2980 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 2981 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 2982 __ fdivs(Rdst, Rsrc1, Rsrc2); 2983 %} 2984 2985 enc_class enc_add(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 2986 // TODO: PPC port $archOpcode(ppc64Opcode_add); 2987 2988 MacroAssembler _masm(&cbuf); 2989 Register Rdst = reg_to_register_object($dst$$reg); 2990 Register Rsrc1 = reg_to_register_object($src1$$reg); 2991 Register Rsrc2 = reg_to_register_object($src2$$reg); 2992 __ add(Rdst, Rsrc1, Rsrc2); 2993 %} 2994 2995 enc_class enc_subf(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 2996 // TODO: PPC port $archOpcode(ppc64Opcode_subf); 2997 2998 MacroAssembler _masm(&cbuf); 2999 Register Rdst = reg_to_register_object($dst$$reg); 3000 Register Rsrc1 = reg_to_register_object($src1$$reg); 3001 Register Rsrc2 = reg_to_register_object($src2$$reg); 3002 __ subf(Rdst, Rsrc1, Rsrc2); 3003 %} 3004 3005 enc_class enc_mullw(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 3006 // TODO: PPC port $archOpcode(ppc64Opcode_mullw); 3007 3008 MacroAssembler _masm(&cbuf); 3009 Register Rdst = reg_to_register_object($dst$$reg); 3010 Register Rsrc1 = reg_to_register_object($src1$$reg); 3011 Register Rsrc2 = reg_to_register_object($src2$$reg); 3012 __ mullw(Rdst, Rsrc1, Rsrc2); 3013 %} 3014 3015 enc_class enc_mulld(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 3016 // TODO: PPC port $archOpcode(ppc64Opcode_mulld); 3017 3018 MacroAssembler _masm(&cbuf); 3019 Register Rdst = reg_to_register_object($dst$$reg); 3020 Register Rsrc1 = reg_to_register_object($src1$$reg); 3021 Register Rsrc2 = reg_to_register_object($src2$$reg); 3022 __ mulld(Rdst, Rsrc1, Rsrc2); 3023 %} 3024 3025 enc_class enc_mulhd(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 3026 // TODO: PPC port $archOpcode(ppc64Opcode_mulhd); 3027 3028 MacroAssembler _masm(&cbuf); 3029 Register Rdst = reg_to_register_object($dst$$reg); 3030 Register Rsrc1 = reg_to_register_object($src1$$reg); 3031 Register Rsrc2 = reg_to_register_object($src2$$reg); 3032 __ mulhd(Rdst, Rsrc1, Rsrc2); 3033 %} 3034 3035 enc_class enc_fdiv(RegF dst, RegF src1, RegR src2) %{ 3036 // TODO: PPC port $archOpcode(ppc64Opcode_fdiv); 3037 3038 MacroAssembler _masm(&cbuf); 3039 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3040 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3041 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 3042 __ fdiv(Rdst, Rsrc1, Rsrc2); 3043 %} 3044 3045 enc_class enc_fmuls(regF dst, regF src1, regF src2) %{ 3046 // TODO: PPC port $archOpcode(ppc64Opcode_fmuls); 3047 3048 MacroAssembler _masm(&cbuf); 3049 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3050 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3051 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 3052 __ fmuls(Rdst, Rsrc1, Rsrc2); 3053 %} 3054 3055 enc_class enc_fmul(regF dst, regF src1, regF src2) %{ 3056 // TODO: PPC port $archOpcode(ppc64Opcode_fmul); 3057 3058 MacroAssembler _masm(&cbuf); 3059 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3060 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3061 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 3062 __ fmul(Rdst, Rsrc1, Rsrc2); 3063 %} 3064 3065 enc_class enc_fabs(regF dst, regF src1) %{ 3066 // TODO: PPC port $archOpcode(ppc64Opcode_fabs); 3067 3068 MacroAssembler _masm(&cbuf); 3069 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3070 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3071 __ fabs(Rdst, Rsrc1); 3072 %} 3073 3074 enc_class enc_fnabs(regF dst, regF src1) %{ 3075 // TODO: PPC port $archOpcode(ppc64Opcode_fnabs); 3076 3077 MacroAssembler _masm(&cbuf); 3078 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3079 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3080 __ fnabs(Rdst, Rsrc1); 3081 %} 3082 3083 enc_class enc_fsqrt(regF dst, regF src1) %{ 3084 // TODO: PPC port $archOpcode(ppc64Opcode_fsqrt); 3085 3086 MacroAssembler _masm(&cbuf); 3087 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3088 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3089 __ fsqrt(Rdst, Rsrc1); 3090 %} 3091 3092 enc_class enc_frsp(regF dst, regD src1) %{ 3093 // TODO: PPC port $archOpcode(ppc64Opcode_frsp); 3094 3095 MacroAssembler _masm(&cbuf); 3096 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3097 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3098 __ frsp(Rdst, Rsrc1); 3099 %} 3100 3101 enc_class enc_fsqrts(regF dst, regF src1) %{ 3102 // TODO: PPC port $archOpcode(ppc64Opcode_fsqrts); 3103 3104 MacroAssembler _masm(&cbuf); 3105 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3106 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3107 __ fsqrts(Rdst, Rsrc1); 3108 %} 3109 3110 enc_class enc_fneg(regF dst, regF src1) %{ 3111 // TODO: PPC port $archOpcode(ppc64Opcode_fneg); 3112 3113 MacroAssembler _masm(&cbuf); 3114 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3115 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3116 __ fneg(Rdst, Rsrc1); 3117 %} 3118 3119 enc_class enc_lwz(iRegIdst dst, memory mem) %{ 3120 // TODO: PPC port $archOpcode(ppc64Opcode_lwz); 3121 3122 MacroAssembler _masm(&cbuf); 3123 Register Rdst = reg_to_register_object($dst$$reg); 3124 Register Rbase = reg_to_register_object($mem$$base); 3125 int Idisp = $mem$$disp; 3126 if ($mem$$base == 1) { 3127 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3128 } 3129 __ lwz(Rdst, Idisp, Rbase); 3130 %} 3131 3132 // Load acquire. 3133 enc_class enc_lwz_ac(iRegIdst dst, memory mem) %{ 3134 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3135 3136 MacroAssembler _masm(&cbuf); 3137 Register Rdst = reg_to_register_object($dst$$reg); 3138 Register Rbase = reg_to_register_object($mem$$base); 3139 int Idisp = $mem$$disp; 3140 if ($mem$$base == 1) { 3141 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3142 } 3143 __ lwz(Rdst, Idisp, Rbase); 3144 __ twi_0(Rdst); // SAPJVM MD 2011-10-06 replaced cmp-br-isync by twi-isync 3145 __ isync(); 3146 %} 3147 3148 // SAPJVM GL 2013-10-29 Match ConvI2L(LoadI) 3149 enc_class enc_lwa(iRegIdst dst, memory mem) %{ 3150 // TODO: PPC port $archOpcode(ppc64Opcode_lwa); 3151 3152 MacroAssembler _masm(&cbuf); 3153 Register Rdst = reg_to_register_object($dst$$reg); 3154 Register Rbase = reg_to_register_object($mem$$base); 3155 int Idisp = $mem$$disp; 3156 if ($mem$$base == 1) { 3157 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes() */; 3158 } 3159 __ lwa(Rdst, Idisp, Rbase); 3160 %} 3161 3162 enc_class enc_lwa_ac(iRegIdst dst, memory mem) %{ 3163 // TODO: PPC port $archOpcode(ppc64Opcode_lwa); 3164 3165 MacroAssembler _masm(&cbuf); 3166 Register Rdst = reg_to_register_object($dst$$reg); 3167 Register Rbase = reg_to_register_object($mem$$base); 3168 int Idisp = $mem$$disp; 3169 if ($mem$$base == 1) { 3170 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3171 } 3172 __ lwa(Rdst, Idisp, Rbase); 3173 __ twi_0(Rdst); 3174 __ isync(); 3175 %} 3176 3177 enc_class enc_lhz(iRegIdst dst, memory mem) %{ 3178 // TODO: PPC port $archOpcode(ppc64Opcode_lhz); 3179 3180 MacroAssembler _masm(&cbuf); 3181 Register Rdst = reg_to_register_object($dst$$reg); 3182 Register Rbase = reg_to_register_object($mem$$base); 3183 int Idisp = $mem$$disp; 3184 if ($mem$$base == 1) { 3185 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3186 } 3187 __ lhz(Rdst, Idisp, Rbase); 3188 %} 3189 3190 // Load acquire. 3191 enc_class enc_lhz_ac(iRegIdst dst, memory mem) %{ 3192 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3193 3194 MacroAssembler _masm(&cbuf); 3195 Register Rdst = reg_to_register_object($dst$$reg); 3196 Register Rbase = reg_to_register_object($mem$$base); 3197 int Idisp = $mem$$disp; 3198 if ($mem$$base == 1) { 3199 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3200 } 3201 __ lhz(Rdst, Idisp, Rbase); 3202 __ twi_0(Rdst); 3203 __ isync(); 3204 %} 3205 3206 enc_class enc_lha(iRegIdst dst, memory mem) %{ 3207 // TODO: PPC port $archOpcode(ppc64Opcode_lha); 3208 MacroAssembler _masm(&cbuf); 3209 Register Rdst = reg_to_register_object($dst$$reg); 3210 Register Rbase = reg_to_register_object($mem$$base); 3211 int Idisp = $mem$$disp; 3212 if ($mem$$base == 1) { 3213 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3214 } 3215 __ lha(Rdst, Idisp, Rbase); 3216 %} 3217 3218 // Load acquire. 3219 enc_class enc_lha_ac(iRegIdst dst, memory mem) %{ 3220 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3221 MacroAssembler _masm(&cbuf); 3222 Register Rdst = reg_to_register_object($dst$$reg); 3223 Register Rbase = reg_to_register_object($mem$$base); 3224 int Idisp = $mem$$disp; 3225 if ($mem$$base == 1) { 3226 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3227 } 3228 __ lha(Rdst, Idisp, Rbase); 3229 __ twi_0(Rdst); 3230 __ isync(); 3231 %} 3232 3233 enc_class enc_lbz(iRegIdst dst, memory mem) %{ 3234 // TODO: PPC port $archOpcode(ppc64Opcode_lbz); 3235 MacroAssembler _masm(&cbuf); 3236 Register Rdst = reg_to_register_object($dst$$reg); 3237 Register Rbase = reg_to_register_object($mem$$base); 3238 int Idisp = $mem$$disp; 3239 if ($mem$$base == 1) { 3240 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3241 } 3242 __ lbz(Rdst, Idisp, Rbase); 3243 %} 3244 3245 // Load acquire. 3246 enc_class enc_lbz_ac(iRegIdst dst, memory mem) %{ 3247 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3248 MacroAssembler _masm(&cbuf); 3249 Register Rdst = reg_to_register_object($dst$$reg); 3250 Register Rbase = reg_to_register_object($mem$$base); 3251 int Idisp = $mem$$disp; 3252 if ($mem$$base == 1) { 3253 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3254 } 3255 __ lbz(Rdst, Idisp, Rbase); 3256 __ twi_0(Rdst); 3257 __ isync(); 3258 %} 3259 3260 enc_class enc_ld(iRegLdst dst, memoryAlg4 mem) %{ 3261 // TODO: PPC port $archOpcode(ppc64Opcode_ld); 3262 MacroAssembler _masm(&cbuf); 3263 Register Rdst = reg_to_register_object($dst$$reg); 3264 Register Rbase = reg_to_register_object($mem$$base); 3265 int Idisp = $mem$$disp; 3266 if ($mem$$base == 1) { 3267 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3268 } 3269 // Operand 'ds' requires 4-alignment. 3270 assert((Idisp & 0x3) == 0, "unaligned offset"); 3271 __ ld(Rdst, Idisp, Rbase); 3272 %} 3273 3274 // Load acquire. 3275 enc_class enc_ld_ac(iRegLdst dst, memoryAlg4 mem) %{ 3276 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3277 MacroAssembler _masm(&cbuf); 3278 Register Rdst = reg_to_register_object($dst$$reg); 3279 Register Rbase = reg_to_register_object($mem$$base); 3280 int Idisp = $mem$$disp; 3281 if ($mem$$base == 1) { 3282 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3283 } 3284 // Operand 'ds' requires 4-alignment. 3285 assert((Idisp & 0x3) == 0, "unaligned offset"); 3286 __ ld(Rdst, Idisp, Rbase); 3287 __ twi_0(Rdst); 3288 __ isync(); 3289 %} 3290 3291 enc_class enc_lfs(RegF dst, memory mem) %{ 3292 // TODO: PPC port $archOpcode(ppc64Opcode_lfs); 3293 MacroAssembler _masm(&cbuf); 3294 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3295 Register Rbase = reg_to_register_object($mem$$base); 3296 int Idisp = $mem$$disp; 3297 if ($mem$$base == 1) { 3298 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3299 } 3300 __ lfs(Rdst, Idisp, Rbase); 3301 %} 3302 3303 // Load acquire. 3304 enc_class enc_lfs_ac(RegF dst, memory mem) %{ 3305 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3306 MacroAssembler _masm(&cbuf); 3307 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3308 Register Rbase = reg_to_register_object($mem$$base); 3309 int Idisp = $mem$$disp; 3310 Label next; 3311 if ($mem$$base == 1) { 3312 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3313 } 3314 __ lfs(Rdst, Idisp, Rbase); 3315 __ fcmpu(CCR0, Rdst, Rdst); 3316 __ bne(CCR0, next); 3317 __ bind(next); 3318 __ isync(); 3319 %} 3320 3321 enc_class enc_lfd(RegF dst, memory mem) %{ 3322 // TODO: PPC port $archOpcode(ppc64Opcode_lfd); 3323 MacroAssembler _masm(&cbuf); 3324 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3325 Register Rbase = reg_to_register_object($mem$$base); 3326 int Idisp = $mem$$disp; 3327 if ($mem$$base == 1) { 3328 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3329 } 3330 __ lfd(Rdst, Idisp, Rbase); 3331 %} 3332 3333 // Load acquire. 3334 enc_class enc_lfd_ac(RegF dst, memory mem) %{ 3335 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3336 MacroAssembler _masm(&cbuf); 3337 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3338 Register Rbase = reg_to_register_object($mem$$base); 3339 int Idisp = $mem$$disp; 3340 Label next; 3341 if ($mem$$base == 1) { 3342 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3343 } 3344 __ lfd(Rdst, Idisp, Rbase); 3345 __ fcmpu(CCR0, Rdst, Rdst); 3346 __ bne(CCR0, next); 3347 __ bind(next); 3348 __ isync(); 3349 %} 3350 3351 enc_class enc_stfs(RegF src, memory mem) %{ 3352 // TODO: PPC port $archOpcode(ppc64Opcode_stfs); 3353 MacroAssembler _masm(&cbuf); 3354 FloatRegister Rsrc = reg_to_FloatRegister_object($src$$reg); 3355 Register Rbase = reg_to_register_object($mem$$base); 3356 int Idisp = $mem$$disp; 3357 if ($mem$$base == 1) { 3358 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3359 } 3360 __ stfs(Rsrc, Idisp, Rbase); 3361 %} 3362 3363 enc_class enc_stfd(RegF src, memory mem) %{ 3364 // TODO: PPC port $archOpcode(ppc64Opcode_stfd); 3365 MacroAssembler _masm(&cbuf); 3366 FloatRegister Rsrc = reg_to_FloatRegister_object($src$$reg); 3367 Register Rbase = reg_to_register_object($mem$$base); 3368 int Idisp = $mem$$disp; 3369 if ($mem$$base == 1) { 3370 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3371 } 3372 __ stfd(Rsrc, Idisp, Rbase); 3373 %} 3374 3375 enc_class enc_stw(iRegIsrc src, memory mem) %{ 3376 // TODO: PPC port $archOpcode(ppc64Opcode_stw); 3377 MacroAssembler _masm(&cbuf); 3378 Register Rsrc = reg_to_register_object($src$$reg); 3379 Register Rbase = reg_to_register_object($mem$$base); 3380 int Idisp = $mem$$disp; 3381 if ($mem$$base == 1) { 3382 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3383 } 3384 __ stw(Rsrc, Idisp, Rbase); 3385 %} 3386 3387 enc_class enc_stb(iRegIsrc src, memory mem) %{ 3388 // TODO: PPC port $archOpcode(ppc64Opcode_stb); 3389 MacroAssembler _masm(&cbuf); 3390 Register Rsrc = reg_to_register_object($src$$reg); 3391 Register Rbase = reg_to_register_object($mem$$base); 3392 int Idisp = $mem$$disp; 3393 if ($mem$$base == 1) { 3394 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3395 } 3396 __ stb(Rsrc, Idisp, Rbase); 3397 %} 3398 3399 // Store for G1 card marking. 3400 enc_class enc_stb0(memory mem) %{ 3401 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3402 MacroAssembler _masm(&cbuf); 3403 Register Rbase = reg_to_register_object($mem$$base); 3404 int Idisp = $mem$$disp; 3405 if ($mem$$base == 1) { 3406 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3407 } 3408 __ li(R0, 0); 3409 __ stb(R0, Idisp, Rbase); 3410 %} 3411 3412 enc_class enc_sth(iRegIsrc src, memory mem) %{ 3413 // TODO: PPC port $archOpcode(ppc64Opcode_sth); 3414 MacroAssembler _masm(&cbuf); 3415 Register Rsrc = reg_to_register_object($src$$reg); 3416 Register Rbase = reg_to_register_object($mem$$base); 3417 int Idisp = $mem$$disp; 3418 if ($mem$$base == 1) { 3419 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3420 } 3421 __ sth(Rsrc, Idisp, Rbase); 3422 %} 3423 3424 enc_class enc_std(iRegIsrc src, memoryAlg4 mem) %{ 3425 // TODO: PPC port $archOpcode(ppc64Opcode_std); 3426 MacroAssembler _masm(&cbuf); 3427 Register Rsrc = reg_to_register_object($src$$reg); 3428 Register Rbase = reg_to_register_object($mem$$base); 3429 int Idisp = $mem$$disp; 3430 if ($mem$$base == 1) { 3431 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 3432 } 3433 // Operand 'ds' requires 4-alignment. 3434 assert((Idisp & 0x3) == 0, "unaligned offset"); 3435 __ std(Rsrc, Idisp, Rbase); 3436 %} 3437 3438 enc_class enc_cmpw(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{ 3439 // TODO: PPC port $archOpcode(ppc64Opcode_cmp); 3440 MacroAssembler _masm(&cbuf); 3441 Register Rsrc1 = reg_to_register_object($src1$$reg); 3442 Register Rsrc2 = reg_to_register_object($src2$$reg); 3443 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3444 __ cmpw(Rcrx, Rsrc1, Rsrc2); 3445 %} 3446 3447 enc_class enc_cmpld(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{ 3448 // TODO: PPC port $archOpcode(ppc64Opcode_cmpl); 3449 MacroAssembler _masm(&cbuf); 3450 Register Rsrc1 = reg_to_register_object($src1$$reg); 3451 Register Rsrc2 = reg_to_register_object($src2$$reg); 3452 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3453 __ cmpld(Rcrx, Rsrc1, Rsrc2); 3454 %} 3455 3456 enc_class enc_cmplw(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{ 3457 // TODO: PPC port $archOpcode(ppc64Opcode_cmpl); 3458 MacroAssembler _masm(&cbuf); 3459 Register Rsrc1 = reg_to_register_object($src1$$reg); 3460 Register Rsrc2 = reg_to_register_object($src2$$reg); 3461 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3462 __ cmplw(Rcrx, Rsrc1, Rsrc2); 3463 %} 3464 3465 enc_class enc_cmpd(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{ 3466 // TODO: PPC port $archOpcode(ppc64Opcode_cmp); 3467 MacroAssembler _masm(&cbuf); 3468 Register Rsrc1 = reg_to_register_object($src1$$reg); 3469 Register Rsrc2 = reg_to_register_object($src2$$reg); 3470 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3471 __ cmpd(Rcrx, Rsrc1, Rsrc2); 3472 %} 3473 3474 enc_class enc_cmpdi(flagsReg crx, iRegIsrc src1, immL16 src2) %{ 3475 // TODO: PPC port $archOpcode(ppc64Opcode_cmpi); 3476 MacroAssembler _masm(&cbuf); 3477 Register Rsrc1 = reg_to_register_object($src1$$reg); 3478 int Isrc2 = $src2$$constant; 3479 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3480 __ cmpdi(Rcrx, Rsrc1, Isrc2); 3481 %} 3482 3483 enc_class enc_cmpwi(flagsReg crx, iRegIsrc src1, immL16 src2) %{ 3484 // TODO: PPC port $archOpcode(ppc64Opcode_cmpi); 3485 MacroAssembler _masm(&cbuf); 3486 Register Rsrc1 = reg_to_register_object($src1$$reg); 3487 int Isrc2 = $src2$$constant; 3488 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3489 __ cmpwi(Rcrx, Rsrc1, Isrc2); 3490 %} 3491 3492 enc_class enc_cmplwi(flagsReg crx, iRegIsrc src1, immL16 src2) %{ 3493 // TODO: PPC port $archOpcode(ppc64Opcode_cmpli); 3494 MacroAssembler _masm(&cbuf); 3495 Register Rsrc1 = reg_to_register_object($src1$$reg); 3496 int Isrc2 = $src2$$constant; 3497 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3498 __ cmplwi(Rcrx, Rsrc1, Isrc2); 3499 %} 3500 3501 enc_class enc_btst_reg(iRegIsrc src1, iRegIsrc src2) %{ 3502 // TODO: PPC port $archOpcode(ppc64Opcode_and_); 3503 3504 MacroAssembler _masm(&cbuf); 3505 Register Rsrc1 = reg_to_register_object($src1$$reg); 3506 Register Rsrc2 = reg_to_register_object($src2$$reg); 3507 __ and_(R0, Rsrc1, Rsrc2); 3508 %} 3509 3510 enc_class enc_slwi(iRegIdst dst, iRegIsrc src1, immI src2) %{ 3511 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); 3512 3513 MacroAssembler _masm(&cbuf); 3514 Register Rdst = reg_to_register_object($dst$$reg); 3515 Register Rsrc1 = reg_to_register_object($src1$$reg); 3516 int isrc = ($src2$$constant) & 0x1f; 3517 __ slwi(Rdst, Rsrc1, isrc); 3518 %} 3519 3520 enc_class enc_slw(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 3521 // TODO: PPC port $archOpcode(ppc64Opcode_slw); 3522 3523 MacroAssembler _masm(&cbuf); 3524 Register Rdst = reg_to_register_object($dst$$reg); 3525 Register Rsrc1 = reg_to_register_object($src1$$reg); 3526 Register Rsrc2 = reg_to_register_object($src2$$reg); 3527 __ slw(Rdst, Rsrc1, Rsrc2); 3528 %} 3529 3530 enc_class enc_sldi(iRegIdst dst, iRegIsrc src1, immI src2) %{ 3531 // TODO: PPC port $archOpcode(ppc64Opcode_rldicr); 3532 3533 MacroAssembler _masm(&cbuf); 3534 Register Rdst = reg_to_register_object($dst$$reg); 3535 Register Rsrc1 = reg_to_register_object($src1$$reg); 3536 int isrc = ($src2$$constant) & 0x3f; 3537 __ sldi(Rdst, Rsrc1, isrc); 3538 %} 3539 3540 enc_class enc_sld(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 3541 // TODO: PPC port $archOpcode(ppc64Opcode_sld); 3542 3543 MacroAssembler _masm(&cbuf); 3544 Register Rdst = reg_to_register_object($dst$$reg); 3545 Register Rsrc1 = reg_to_register_object($src1$$reg); 3546 Register Rsrc2 = reg_to_register_object($src2$$reg); 3547 __ sld(Rdst, Rsrc1, Rsrc2); 3548 %} 3549 3550 enc_class enc_clrlsldi(iRegLdst dst, iRegIsrc src, uimmI6 clearleftbits, uimmI6 shiftleftbits) %{ 3551 // TODO: PPC port $archOpcode(ppc64Opcode_rldic); 3552 3553 MacroAssembler _masm(&cbuf); 3554 Register Rdst = reg_to_register_object($dst$$reg); 3555 Register Rsrc = reg_to_register_object($src$$reg); 3556 int iclearleftbits = $clearleftbits$$constant; 3557 int ishiftleftbits = $shiftleftbits$$constant; 3558 __ clrlsldi(Rdst, Rsrc, iclearleftbits, ishiftleftbits); 3559 %} 3560 3561 // 32G aligned narrow oop base. 3562 // Extract bits 32+3 .. 3 and place them right-justified in dst. 3563 enc_class enc_extrdi_encode(iRegLdst dst, iRegIsrc src) %{ 3564 // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 3565 3566 MacroAssembler _masm(&cbuf); 3567 Register Rdst = reg_to_register_object($dst$$reg); 3568 Register Rsrc = reg_to_register_object($src$$reg); 3569 __ rldicl(Rdst, Rsrc, 64-Universe::narrow_oop_shift(), 32); 3570 %} 3571 3572 enc_class enc_clrldi(iRegLdst dst, iRegIsrc src, uimmI6 clearleftbits) %{ 3573 // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 3574 3575 MacroAssembler _masm(&cbuf); 3576 Register Rdst = reg_to_register_object($dst$$reg); 3577 Register Rsrc = reg_to_register_object($src$$reg); 3578 int iclearleftbits = $clearleftbits$$constant; 3579 __ clrldi(Rdst, Rsrc, iclearleftbits); 3580 %} 3581 3582 enc_class enc_srwi(iRegIdst dst, iRegIsrc src1, immI src2) %{ 3583 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); 3584 3585 MacroAssembler _masm(&cbuf); 3586 Register Rdst = reg_to_register_object($dst$$reg); 3587 Register Rsrc1 = reg_to_register_object($src1$$reg); 3588 int isrc = ($src2$$constant) & 0x1f; 3589 __ srwi(Rdst, Rsrc1, isrc); 3590 %} 3591 3592 enc_class enc_srw(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 3593 // TODO: PPC port $archOpcode(ppc64Opcode_srw); 3594 3595 MacroAssembler _masm(&cbuf); 3596 Register Rdst = reg_to_register_object($dst$$reg); 3597 Register Rsrc1 = reg_to_register_object($src1$$reg); 3598 Register Rsrc2 = reg_to_register_object($src2$$reg); 3599 __ srw(Rdst, Rsrc1, Rsrc2); 3600 %} 3601 3602 enc_class enc_srawi(iRegIdst dst, iRegIsrc src1, immI src2) %{ 3603 // TODO: PPC port $archOpcode(ppc64Opcode_srawi); 3604 3605 MacroAssembler _masm(&cbuf); 3606 Register Rdst = reg_to_register_object($dst$$reg); 3607 Register Rsrc1 = reg_to_register_object($src1$$reg); 3608 int isrc = ($src2$$constant) & 0x1f; 3609 __ srawi(Rdst, Rsrc1, isrc); 3610 %} 3611 3612 enc_class enc_sraw(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 3613 // TODO: PPC port $archOpcode(ppc64Opcode_sraw); 3614 3615 MacroAssembler _masm(&cbuf); 3616 Register Rdst = reg_to_register_object($dst$$reg); 3617 Register Rsrc1 = reg_to_register_object($src1$$reg); 3618 Register Rsrc2 = reg_to_register_object($src2$$reg); 3619 __ sraw(Rdst, Rsrc1, Rsrc2); 3620 %} 3621 3622 enc_class enc_sradi(iRegIdst dst, iRegIsrc src1, immI src2) %{ 3623 // TODO: PPC port $archOpcode(ppc64Opcode_sradi); 3624 3625 MacroAssembler _masm(&cbuf); 3626 Register Rdst = reg_to_register_object($dst$$reg); 3627 Register Rsrc1 = reg_to_register_object($src1$$reg); 3628 int isrc = ($src2$$constant) & 0x3f; 3629 __ sradi(Rdst, Rsrc1, isrc); 3630 %} 3631 3632 enc_class enc_srad(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 3633 // TODO: PPC port $archOpcode(ppc64Opcode_srad); 3634 3635 MacroAssembler _masm(&cbuf); 3636 Register Rdst = reg_to_register_object($dst$$reg); 3637 Register Rsrc1 = reg_to_register_object($src1$$reg); 3638 Register Rsrc2 = reg_to_register_object($src2$$reg); 3639 __ srad(Rdst, Rsrc1, Rsrc2); 3640 %} 3641 3642 enc_class enc_srdi(iRegLdst dst, iRegLsrc src1, immI src2) %{ 3643 // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 3644 3645 MacroAssembler _masm(&cbuf); 3646 Register Rdst = reg_to_register_object($dst$$reg); 3647 Register Rsrc1 = reg_to_register_object($src1$$reg); 3648 int isrc = ($src2$$constant) & 0x3f; 3649 __ srdi(Rdst, Rsrc1, isrc); 3650 %} 3651 3652 enc_class enc_srd(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 3653 // TODO: PPC port $archOpcode(ppc64Opcode_srd); 3654 3655 MacroAssembler _masm(&cbuf); 3656 Register Rdst = reg_to_register_object($dst$$reg); 3657 Register Rsrc1 = reg_to_register_object($src1$$reg); 3658 Register Rsrc2 = reg_to_register_object($src2$$reg); 3659 __ srd(Rdst, Rsrc1, Rsrc2); 3660 %} 3661 3662 // Rotate word left. 3663 enc_class enc_rotlwi(iRegIdst dst, iRegIsrc src, immI8 shift) %{ 3664 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); 3665 3666 MacroAssembler _masm(&cbuf); 3667 Register Rdst = reg_to_register_object($dst$$reg); 3668 Register Rsrc = reg_to_register_object($src$$reg); 3669 int ishift = $shift$$constant; 3670 __ rotlwi(Rdst, Rsrc, ishift); 3671 %} 3672 3673 // Rotate word right. 3674 enc_class enc_rotrwi(iRegIdst dst, iRegIsrc src, immI8 shift) %{ 3675 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); 3676 3677 MacroAssembler _masm(&cbuf); 3678 Register Rdst = reg_to_register_object($dst$$reg); 3679 Register Rsrc = reg_to_register_object($src$$reg); 3680 int ishift = $shift$$constant; 3681 __ rotrwi(Rdst, Rsrc, ishift); 3682 %} 3683 3684 enc_class enc_andI_reg_immInegpow2(iRegIdst dst, iRegIsrc src1, immInegpow2 src2) %{ 3685 // TODO: PPC port $archOpcode(ppc64Opcode_rldicr); 3686 3687 MacroAssembler _masm(&cbuf); 3688 Register Rdst = reg_to_register_object($dst$$reg); 3689 Register Rsrc1 = reg_to_register_object($src1$$reg); 3690 int src2 = $src2$$constant; 3691 __ clrrdi(Rdst, Rsrc1, log2_long((jlong) (julong) (juint) -src2)); 3692 %} 3693 3694 enc_class enc_andL_reg_immLnegpow2(iRegLdst dst, iRegLsrc src1, immLnegpow2 src2) %{ 3695 // TODO: PPC port $archOpcode(ppc64Opcode_rldicr); 3696 3697 MacroAssembler _masm(&cbuf); 3698 Register Rdst = reg_to_register_object($dst$$reg); 3699 Register Rsrc1 = reg_to_register_object($src1$$reg); 3700 long src2 = $src2$$constant; 3701 __ clrrdi(Rdst, Rsrc1, log2_long((jlong)-src2)); 3702 %} 3703 3704 enc_class enc_and_reg_immpow2minus1(iRegLdst dst, iRegLsrc src1, immLpow2minus1 src2) %{ 3705 // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 3706 3707 MacroAssembler _masm(&cbuf); 3708 Register Rdst = reg_to_register_object($dst$$reg); 3709 Register Rsrc1 = reg_to_register_object($src1$$reg); 3710 long src2 = $src2$$constant; 3711 __ clrldi(Rdst, Rsrc1, 64-log2_long((((jlong) src2)+1))); 3712 %} 3713 3714 enc_class enc_andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %{ 3715 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); 3716 3717 MacroAssembler _masm(&cbuf); 3718 Register Rdst = reg_to_register_object($dst$$reg); 3719 Register Rsrc1 = reg_to_register_object($src1$$reg); 3720 long src2 = $src2$$constant; 3721 __ rlwinm(Rdst, Rsrc1, 0, (31-log2_long((jlong) src2)) & 0x1f, (31-log2_long((jlong) src2)) & 0x1f); 3722 %} 3723 3724 enc_class enc_convI2B_andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %{ 3725 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); 3726 3727 MacroAssembler _masm(&cbuf); 3728 Register Rdst = reg_to_register_object($dst$$reg); 3729 Register Rsrc1 = reg_to_register_object($src1$$reg); 3730 long src2 = $src2$$constant; 3731 __ rlwinm(Rdst, Rsrc1, (32-log2_long((jlong) src2)) & 0x1f, 31, 31); 3732 %} 3733 3734 enc_class enc_load_toc1(iRegLdst dst) %{ 3735 // TODO: PPC port $archOpcode(ppc64Opcode_addis); 3736 3737 MacroAssembler _masm(&cbuf); 3738 Register Rdst = reg_to_register_object($dst$$reg); 3739 3740 __ calculate_address_from_global_toc_hi16only(Rdst, __ method_toc()); 3741 %} 3742 3743 enc_class enc_load_toc2(iRegLdst dst, iRegLdst src) %{ 3744 // TODO: PPC port $archOpcode(ppc64Opcode_ori); 3745 3746 MacroAssembler _masm(&cbuf); 3747 Register Rdst = reg_to_register_object($dst$$reg); 3748 3749 __ calculate_address_from_global_toc_lo16only(Rdst, __ method_toc()); 3750 %} 3751 3752 // Load compressed oop constant. 3753 enc_class enc_load_con_narrow1(iRegNdst dst, immN src) %{ 3754 // TODO: PPC port $archOpcode(ppc64Opcode_addis); 3755 MacroAssembler _masm(&cbuf); 3756 Register Rdst = reg_to_register_object($dst$$reg); 3757 int Csrc = $src$$constant; 3758 int xc = (Csrc >> 16) & 0xffff; 3759 int xd = (Csrc >> 0) & 0xffff; 3760 int value = (int)(short)(xc + ((xd & 0x8000) != 0 ? 1 : 0)); // Compensate sign extend like in patch_set_narrow_oop. 3761 __ lis(Rdst, value); 3762 %} 3763 3764 // SLoad compressed oop constant. 3765 enc_class enc_load_con_narrow2(iRegNdst dst, iRegNsrc src1, immN src2) %{ 3766 // TODO: PPC port $archOpcode(ppc64Opcode_addi); 3767 MacroAssembler _masm(&cbuf); 3768 Register Rdst = reg_to_register_object($dst$$reg); 3769 Register Rsrc1 = reg_to_register_object($src1$$reg); 3770 int Csrc = $src2$$constant; 3771 assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder"); 3772 int oop_index = __ oop_recorder()->find_index((jobject)$src2$$constant); 3773 RelocationHolder rspec = oop_Relocation::spec(oop_index); 3774 3775 __ relocate(rspec, 1); 3776 __ addi(Rdst, Rsrc1, (int)(short)(Csrc & 0xffff)); 3777 %} 3778 3779 enc_class enc_convI2B_regI__cmove(iRegIdst dst, iRegIsrc src, flagsReg flags, immI16 zero, immI16 notzero) %{ 3780 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3781 3782 MacroAssembler _masm(&cbuf); 3783 Register Rdst = reg_to_register_object($dst$$reg); 3784 Register Rsrc = reg_to_register_object($src$$reg); 3785 ConditionRegister Rcrx = reg_to_ConditionRegister_object($flags$$reg); 3786 3787 Label done; 3788 __ cmpwi(Rcrx, Rsrc, 0); 3789 __ li(Rdst, $zero$$constant); 3790 __ beq(Rcrx, done); 3791 __ li(Rdst, $notzero$$constant); 3792 __ bind(done); 3793 %} 3794 3795 enc_class enc_convP2B_regP__cmove(iRegIdst dst, iRegPsrc src, flagsReg flags, immI16 zero, immI16 notzero) %{ 3796 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 3797 3798 MacroAssembler _masm(&cbuf); 3799 Register Rdst = reg_to_register_object($dst$$reg); 3800 Register Rsrc = reg_to_register_object($src$$reg); 3801 ConditionRegister Rcrx = reg_to_ConditionRegister_object($flags$$reg); 3802 3803 Label done; 3804 __ cmpdi(Rcrx, Rsrc, 0); 3805 __ li(Rdst, $zero$$constant); 3806 __ beq(Rcrx, done); 3807 __ li(Rdst, $notzero$$constant); 3808 __ bind(done); 3809 %} 3810 3811 enc_class enc_lShiftI_andI_immInegpow2_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, immU5 src3) %{ 3812 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); // FIXME: assert that rlwinm is equal to addi 3813 3814 MacroAssembler _masm(&cbuf); 3815 Register Rdst = reg_to_register_object($dst$$reg); 3816 Register Rsrc1 = reg_to_register_object($src1$$reg); 3817 long src2 = $src2$$constant; 3818 long src3 = $src3$$constant; 3819 long maskbits = src3 + log2_long((jlong) (julong) (juint) -src2); 3820 if (maskbits >= 32) { 3821 __ li(Rdst, 0); // addi 3822 } else { 3823 __ rlwinm(Rdst, Rsrc1, src3 & 0x1f, 0, (31-maskbits) & 0x1f); 3824 } 3825 %} 3826 3827 enc_class enc_lShiftI_andI_immInegpow2_rShiftI_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, immU5 src3) %{ 3828 // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); // FIXME: assert that rlwinm is equal to addi 3829 3830 MacroAssembler _masm(&cbuf); 3831 Register Rdst = reg_to_register_object($dst$$reg); 3832 Register Rsrc1 = reg_to_register_object($src1$$reg); 3833 long src2 = $src2$$constant; 3834 long src3 = $src3$$constant; 3835 long maskbits = src3 + log2_long((jlong) (julong) (juint) -src2); 3836 if (maskbits >= 32) { 3837 __ li(Rdst, 0); // addi 3838 } else { 3839 __ rlwinm(Rdst, Rsrc1, 0, 0, (31-maskbits) & 0x1f); 3840 } 3841 %} 3842 3843 enc_class enc_zeroextendw(iRegLdst dst, iRegIsrc src) %{ 3844 // TODO: PPC port $archOpcode(ppc64Opcode_rldicl); 3845 3846 MacroAssembler _masm(&cbuf); 3847 Register Rdst = reg_to_register_object($dst$$reg); 3848 Register Rsrc = reg_to_register_object($src$$reg); 3849 __ clrldi(Rdst, Rsrc, 32); 3850 %} 3851 3852 enc_class enc_fcmpu(flagsReg crx, regF src1, regF src2) %{ 3853 // TODO: PPC port $archOpcode(ppc64Opcode_fcmpu); 3854 MacroAssembler _masm(&cbuf); 3855 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 3856 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3857 FloatRegister Rsrc2 = reg_to_FloatRegister_object($src2$$reg); 3858 __ fcmpu(Rcrx, Rsrc1, Rsrc2); 3859 %} 3860 3861 enc_class enc_fctiwz(regF dst, regF src1) %{ 3862 // TODO: PPC port $archOpcode(ppc64Opcode_fctiwz); 3863 MacroAssembler _masm(&cbuf); 3864 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3865 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3866 __ fctiwz(Rdst, Rsrc1); 3867 %} 3868 3869 enc_class enc_fctidz(regF dst, regF src1) %{ 3870 // TODO: PPC port $archOpcode(ppc64Opcode_fctiwz); 3871 MacroAssembler _masm(&cbuf); 3872 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3873 FloatRegister Rsrc1 = reg_to_FloatRegister_object($src1$$reg); 3874 __ fctidz(Rdst, Rsrc1); 3875 %} 3876 3877 enc_class enc_mr_if_needed(iRegIdst dst, iRegIsrc src) %{ 3878 // TODO: PPC port $archOpcode(ppc64Opcode_or); 3879 MacroAssembler _masm(&cbuf); 3880 Register Rdst = reg_to_register_object($dst$$reg); 3881 Register Rsrc = reg_to_register_object($src$$reg); 3882 if (Rsrc != Rdst) { 3883 __ mr(Rdst, Rsrc); 3884 } 3885 %} 3886 3887 enc_class enc_fmr_if_needed(regF dst, regF src) %{ 3888 // TODO: PPC port $archOpcode(ppc64Opcode_fmr); 3889 MacroAssembler _masm(&cbuf); 3890 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 3891 FloatRegister Rsrc = reg_to_FloatRegister_object($src$$reg); 3892 if (Rsrc != Rdst) { 3893 __ fmr(Rdst, Rsrc); 3894 } 3895 %} 3896 3897 // Unconditional branch. 3898 enc_class enc_b(Label lbl) %{ 3899 // TODO: PPC port $archOpcode(ppc64Opcode_b); 3900 3901 MacroAssembler _masm(&cbuf); 3902 Label d; // dummy 3903 __ bind(d); 3904 Label* p = ($lbl$$label); 3905 // `p' is `NULL' when this encoding class is used only to 3906 // determine the size of the encoded instruction. 3907 Label& l = (NULL == p)? d : *(p); 3908 __ b(l); 3909 %} 3910 3911 enc_class enc_bc(flagsReg crx, cmpOp cmp, Label lbl) %{ 3912 // TODO: PPC port $archOpcode(ppc64Opcode_bc); 3913 3914 MacroAssembler _masm(&cbuf); 3915 Label d; // dummy 3916 __ bind(d); 3917 Label* p = ($lbl$$label); 3918 // `p' is `NULL' when this encoding class is used only to 3919 // determine the size of the encoded instruction. 3920 Label& l = (NULL == p)? d : *(p); 3921 int cc = $cmp$$cmpcode; 3922 int flags_reg = $crx$$reg; 3923 assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding"); 3924 int bhint = Assembler::bhintNoHint; 3925 3926 if (UseStaticBranchPredictionForUncommonPathsPPC64) { 3927 if (_prob <= PROB_NEVER) { 3928 bhint = Assembler::bhintIsNotTaken; 3929 } else if (_prob >= PROB_ALWAYS) { 3930 bhint = Assembler::bhintIsTaken; 3931 } 3932 } 3933 3934 __ bc(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)), 3935 cc_to_biint(cc, flags_reg), 3936 l); 3937 %} 3938 3939 enc_class enc_bc_far(flagsReg crx, cmpOp cmp, Label lbl) %{ 3940 // The scheduler doesn't know about branch shortening, so we set the opcode 3941 // to ppc64Opcode_bc in order to hide this detail from the scheduler. 3942 // TODO: PPC port $archOpcode(ppc64Opcode_bc); 3943 3944 MacroAssembler _masm(&cbuf); 3945 Label d; // dummy 3946 __ bind(d); 3947 Label* p = ($lbl$$label); 3948 // `p' is `NULL' when this encoding class is used only to 3949 // determine the size of the encoded instruction. 3950 Label& l = (NULL == p)? d : *(p); 3951 int cc = $cmp$$cmpcode; 3952 int flags_reg = $crx$$reg; 3953 int bhint = Assembler::bhintNoHint; 3954 3955 if (UseStaticBranchPredictionForUncommonPathsPPC64) { 3956 if (_prob <= PROB_NEVER) { 3957 bhint = Assembler::bhintIsNotTaken; 3958 } else if (_prob >= PROB_ALWAYS) { 3959 bhint = Assembler::bhintIsTaken; 3960 } 3961 } 3962 3963 // Tell the conditional far branch to optimize itself when being relocated. 3964 __ bc_far(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)), 3965 cc_to_biint(cc, flags_reg), 3966 l, 3967 MacroAssembler::bc_far_optimize_on_relocate); 3968 %} 3969 3970 // Branch used with Power6 scheduling (can be shortened without changing the node). 3971 enc_class enc_bc_short_far(flagsReg crx, cmpOp cmp, Label lbl) %{ 3972 // The scheduler doesn't know about branch shortening, so we set the opcode 3973 // to ppc64Opcode_bc in order to hide this detail from the scheduler. 3974 // TODO: PPC port $archOpcode(ppc64Opcode_bc); 3975 3976 MacroAssembler _masm(&cbuf); 3977 Label d; // dummy 3978 __ bind(d); 3979 Label* p = ($lbl$$label); 3980 // `p' is `NULL' when this encoding class is used only to 3981 // determine the size of the encoded instruction. 3982 Label& l = (NULL == p)? d : *(p); 3983 int cc = $cmp$$cmpcode; 3984 int flags_reg = $crx$$reg; 3985 int bhint = Assembler::bhintNoHint; 3986 3987 if (UseStaticBranchPredictionForUncommonPathsPPC64) { 3988 if (_prob <= PROB_NEVER) { 3989 bhint = Assembler::bhintIsNotTaken; 3990 } else if (_prob >= PROB_ALWAYS) { 3991 bhint = Assembler::bhintIsTaken; 3992 } 3993 } 3994 3995 #if 0 // TODO: PPC port 3996 if (_size == 8) { 3997 // Tell the conditional far branch to optimize itself when being relocated. 3998 __ bc_far(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)), 3999 cc_to_biint(cc, flags_reg), 4000 l, 4001 MacroAssembler::bc_far_optimize_on_relocate); 4002 } else { 4003 __ bc (Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)), 4004 cc_to_biint(cc, flags_reg), 4005 l); 4006 } 4007 #endif 4008 Unimplemented(); 4009 %} 4010 4011 enc_class enc_cntlzw(iRegIdst dst, iRegIsrc src) %{ 4012 // TODO: PPC port $archOpcode(ppc64Opcode_cntlzw); 4013 4014 MacroAssembler _masm(&cbuf); 4015 Register Rdst = reg_to_register_object($dst$$reg); 4016 Register Rsrc = reg_to_register_object($src$$reg); 4017 __ cntlzw(Rdst, Rsrc); 4018 %} 4019 4020 enc_class enc_cntlzd(iRegIdst dst, iRegIsrc src) %{ 4021 // TODO: PPC port $archOpcode(ppc64Opcode_cntlzd); 4022 4023 MacroAssembler _masm(&cbuf); 4024 Register Rdst = reg_to_register_object($dst$$reg); 4025 Register Rsrc = reg_to_register_object($src$$reg); 4026 __ cntlzd(Rdst, Rsrc); 4027 %} 4028 4029 enc_class enc_li(iRegIdst dst, immI src) %{ 4030 // TODO: PPC port $archOpcode(ppc64Opcode_addi); 4031 4032 MacroAssembler _masm(&cbuf); 4033 Register Rdst = reg_to_register_object($dst$$reg); 4034 int v = $src$$constant; 4035 __ li(Rdst, (int)((short) (v & 0xFFFF))); 4036 %} 4037 4038 enc_class enc_lis_32(iRegIdst dst, immI src) %{ 4039 // TODO: PPC port $archOpcode(ppc64Opcode_addis); 4040 4041 MacroAssembler _masm(&cbuf); 4042 Register Rdst = reg_to_register_object($dst$$reg); 4043 int v = $src$$constant; 4044 // Lis sign extends 16-bit src then shifts it 16 bit to the left. 4045 __ lis(Rdst, (int)((short) ((v & 0xFFFF0000) >> 16))); 4046 %} 4047 4048 // Late expand emitter for loading a long constant from the method's TOC. 4049 enc_class lateExpand_load_long_constant(iRegLdst dst, immL src, iRegPdst toc) %{ 4050 // Create new nodes. 4051 loadConLNodesTuple loadConLNodes = 4052 loadConLNodesTuple_create(C, ra_, n_toc, op_src, false, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4053 4054 // Push new nodes. 4055 if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi); 4056 if (loadConLNodes._last) nodes->push(loadConLNodes._last); 4057 4058 // some asserts 4059 assert(nodes->length() >= 1, "must have created at least 1 node"); 4060 assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long"); 4061 %} 4062 4063 // Late expand emitter for loading a replicatef float constant from the method's TOC. 4064 enc_class lateExpand_load_replF_constant(iRegLdst dst, immF src, iRegPdst toc) %{ 4065 // Create new nodes. 4066 4067 // Make an operand with the bit pattern to load as float. 4068 immLOper *op_repl = new (C) immLOper((jlong)replicate_immF(op_src->constantF())); 4069 4070 loadConLNodesTuple loadConLNodes = 4071 loadConLNodesTuple_create(C, ra_, n_toc, op_repl, false, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4072 4073 // Push new nodes. 4074 if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi); 4075 if (loadConLNodes._last) nodes->push(loadConLNodes._last); 4076 4077 assert(nodes->length() >= 1, "must have created at least 1 node"); 4078 assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long"); 4079 %} 4080 4081 // Load compressed oop constant. 4082 enc_class lateExpand_load_conN(iRegNdst dst, immN src) %{ 4083 MachNode *m1 = new (C) loadConN1Node(); 4084 MachNode *m2 = new (C) loadConN2Node(); 4085 MachNode *m3 = new (C) clearMs32bNode(); 4086 m1->add_req(NULL); 4087 m2->add_req(NULL, m1); 4088 m3->add_req(NULL, m2); 4089 m1->_opnds[0] = op_dst; 4090 m1->_opnds[1] = op_src; 4091 m2->_opnds[0] = op_dst; 4092 m2->_opnds[1] = op_dst; 4093 m2->_opnds[2] = op_src; 4094 m3->_opnds[0] = op_dst; 4095 m3->_opnds[1] = op_dst; 4096 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4097 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4098 ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4099 nodes->push(m1); 4100 nodes->push(m2); 4101 nodes->push(m3); 4102 %} 4103 4104 // Late expand emitter for loading a ptr constant from the method's TOC. 4105 enc_class lateExpand_load_ptr_constant(iRegPdst dst, immP src, iRegPdst toc) %{ 4106 // Create new nodes. 4107 loadConPNodesTuple loadConPNodes = 4108 loadConPNodesTuple_create(C, ra_, n_toc, op_src, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4109 4110 // Push new nodes. 4111 if (loadConPNodes._large_hi) nodes->push(loadConPNodes._large_hi); 4112 if (loadConPNodes._last) nodes->push(loadConPNodes._last); 4113 4114 // some asserts 4115 assert(nodes->length() >= 1, "must have created at least 1 node"); 4116 assert(loadConPNodes._last->bottom_type()->isa_ptr(), "must be ptr"); 4117 %} 4118 4119 enc_class lateExpand_load_float_constant(regF dst, immF src, iRegPdst toc) %{ 4120 bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000; 4121 4122 MachNode *m2; 4123 if (large_constant_pool) { 4124 m2 = new (C) loadConFCompNode(); 4125 } else { 4126 m2 = new (C) loadConFNode(); 4127 } 4128 // inputs for new nodes 4129 m2->add_req(NULL, n_toc); 4130 4131 // operands for new nodes 4132 m2->_opnds[0] = op_dst; 4133 m2->_opnds[1] = op_src; 4134 m2->_opnds[2] = new (C) iRegPdstOper(); // constanttablebase 4135 4136 // register allocation for new nodes 4137 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4138 nodes->push(m2); 4139 %} 4140 4141 enc_class lateExpand_load_double_constant(regD dst, immD src, iRegPdst toc) %{ 4142 bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000; 4143 4144 MachNode *m2; 4145 if (large_constant_pool) { 4146 m2 = new (C) loadConDCompNode(); 4147 } else { 4148 m2 = new (C) loadConDNode(); 4149 } 4150 // inputs for new nodes 4151 m2->add_req(NULL, n_toc); 4152 4153 // operands for new nodes 4154 m2->_opnds[0] = op_dst; 4155 m2->_opnds[1] = op_src; 4156 m2->_opnds[2] = new (C) iRegPdstOper(); // constanttablebase 4157 4158 // register allocation for new nodes 4159 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4160 nodes->push(m2); 4161 %} 4162 4163 enc_class enc_load_long_constL_hi(iRegLdst dst, iRegLdst toc, immL src, immI isoop) %{ 4164 // TODO: PPC port $archOpcode(ppc64Opcode_addis); 4165 4166 MacroAssembler _masm(&cbuf); 4167 Register Rdst = reg_to_register_object($dst$$reg); 4168 Register Rtoc = reg_to_register_object($toc$$reg); 4169 assert($isoop$$constant == 1 || $isoop$$constant == 0, "must be 1 or 0"); 4170 4171 if (!ra_->C->in_scratch_emit_size()) { 4172 address const_toc_addr; 4173 if ($isoop$$constant == 1) { 4174 // Create an oop constant and a corresponding relocation. 4175 AddressLiteral a = __ allocate_oop_address((jobject)$src$$constant); 4176 const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none); 4177 __ relocate(a.rspec()); 4178 } else { 4179 // Create a non-oop constant, no relocation needed. 4180 const_toc_addr = __ long_constant((jlong)$src$$constant); 4181 } 4182 4183 // Get the constant's TOC offset. 4184 const int toc_offset = __ offset_to_method_toc(const_toc_addr); 4185 // Store the toc offset of the constant. 4186 ((loadConL_hiNode*)this)->_const_toc_offset = toc_offset; 4187 4188 // Also keep the current instruction offset in mind. 4189 ((loadConL_hiNode*)this)->_cbuf_insts_offset = __ offset(); 4190 } 4191 4192 __ addis(Rdst, Rtoc, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset)); 4193 %} 4194 4195 enc_class enc_load_long_constP_hi(iRegLdst dst, iRegLdst toc, immP src, immI isoop) %{ 4196 // TODO: PPC port $archOpcode(ppc64Opcode_addis); 4197 4198 MacroAssembler _masm(&cbuf); 4199 Register Rdst = reg_to_register_object($dst$$reg); 4200 Register Rtoc = reg_to_register_object($toc$$reg); 4201 assert($isoop$$constant == 1 || $isoop$$constant == 0, "must be 1 or 0"); 4202 4203 if (!ra_->C->in_scratch_emit_size()) { 4204 address const_toc_addr; 4205 if ($isoop$$constant == 1) { 4206 // Create an oop constant and a corresponding relocation. 4207 AddressLiteral a = __ allocate_oop_address((jobject)$src$$constant); 4208 const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none); 4209 __ relocate(a.rspec()); 4210 } else { 4211 // Create a non-oop constant, no relocation needed. 4212 const_toc_addr = __ long_constant((jlong)$src$$constant); 4213 } 4214 4215 // Get the constant's TOC offset. 4216 const int toc_offset = __ offset_to_method_toc(const_toc_addr); 4217 4218 // Store the toc offset of the constant. 4219 ((loadConP_hiNode*)this)->_const_toc_offset = toc_offset; 4220 } 4221 4222 __ addis(Rdst, Rtoc, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset)); 4223 %} 4224 4225 enc_class enc_load_long_const_lo(iRegLdst dst, immL src, iRegLsrc base, immI isoop) %{ 4226 // TODO: PPC port $archOpcode(ppc64Opcode_ld); 4227 4228 MacroAssembler _masm(&cbuf); 4229 Register Rdst = reg_to_register_object($dst$$reg); 4230 Register Rbase = reg_to_register_object($base$$reg); 4231 4232 int offset = ra_->C->in_scratch_emit_size() ? 0 : this->_const_toc_offset_hi_node->_const_toc_offset; 4233 __ ld(Rdst, MacroAssembler::largeoffset_si16_si16_lo(offset), Rbase); 4234 %} 4235 4236 enc_class enc_load_long_constL(iRegLdst dst, immL src, iRegLdst toc, immI isoop) %{ 4237 // TODO: PPC port $archOpcode(ppc64Opcode_ld); 4238 4239 MacroAssembler _masm(&cbuf); 4240 Register Rdst = reg_to_register_object($dst$$reg); 4241 Register Rtoc = reg_to_register_object($toc$$reg); 4242 int toc_offset = 0; 4243 assert($isoop$$constant == 1 || $isoop$$constant == 0, "must be 1 or 0"); 4244 4245 if (!ra_->C->in_scratch_emit_size()) { 4246 address const_toc_addr; 4247 if ($isoop$$constant == 1) { 4248 #if 0 // TODO: PPC port 4249 // Create an oop constant and a corresponding relocation. 4250 AddressLiteral a = __ allocate_oop_address((jobject)$src$$constant); 4251 const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none); 4252 __ relocate(a.rspec()); 4253 #endif 4254 Unimplemented(); 4255 } else { 4256 // Create a non-oop constant, no relocation needed. 4257 const_toc_addr = __ long_constant((jlong)$src$$constant); 4258 } 4259 4260 // Get the constant's TOC offset. 4261 toc_offset = __ offset_to_method_toc(const_toc_addr); 4262 4263 // Keep the current instruction offset in mind. 4264 ((loadConLNode*)this)->_cbuf_insts_offset = __ offset(); 4265 } 4266 4267 __ ld(Rdst, toc_offset, Rtoc); 4268 %} 4269 4270 enc_class enc_load_long_constP(iRegLdst dst, immP src, iRegLdst toc, immI isoop) %{ 4271 // TODO: PPC port $archOpcode(ppc64Opcode_ld); 4272 4273 MacroAssembler _masm(&cbuf); 4274 Register Rdst = reg_to_register_object($dst$$reg); 4275 Register Rtoc = reg_to_register_object($toc$$reg); 4276 int toc_offset = 0; 4277 assert($isoop$$constant == 1 || $isoop$$constant == 0, "must be 1 or 0"); 4278 4279 if (!Compile::current()->in_scratch_emit_size()) { 4280 address const_toc_addr; 4281 if ($isoop$$constant == 1) { 4282 #if 0 // TODO: PPC port 4283 // Create an oop constant and a corresponding relocation. 4284 AddressLiteral a = __ allocate_oop_address((jobject)$src$$constant); 4285 const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none); 4286 __ relocate(a.rspec()); 4287 #endif 4288 Unimplemented(); 4289 } else { 4290 // Create a non-oop constant, no relocation needed. 4291 const_toc_addr = __ long_constant((jlong)$src$$constant); 4292 } 4293 4294 // Get the constant's TOC offset. 4295 toc_offset = __ offset_to_method_toc(const_toc_addr); 4296 } 4297 4298 __ ld(Rdst, toc_offset, Rtoc); 4299 %} 4300 4301 enc_class enc_load_float_const(regF dst, immF src, iRegLdst toc) %{ 4302 // TODO: PPC port $archOpcode(ppc64Opcode_lfs); 4303 MacroAssembler _masm(&cbuf); 4304 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 4305 address float_address = __ float_constant($src$$constant); 4306 Register Rtoc; 4307 Rtoc = reg_to_register_object($toc$$reg); 4308 __ lfs(Rdst, __ offset_to_method_toc(float_address), Rtoc); 4309 %} 4310 4311 // As enc_load_float_const, but for large constant pool. 4312 enc_class enc_load_float_const_comp(regF dst, immF src, iRegLdst toc) %{ 4313 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4314 MacroAssembler _masm(&cbuf); 4315 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 4316 address float_address = __ float_constant($src$$constant); 4317 int offset = __ offset_to_method_toc(float_address); 4318 Register Rtoc; 4319 Rtoc = reg_to_register_object($toc$$reg); 4320 4321 int hi = (offset + (1<<15))>>16; 4322 int lo = offset - hi * (1<<16); 4323 __ addis(Rtoc, Rtoc, hi); 4324 __ lfs(Rdst, lo, Rtoc); 4325 __ addis(Rtoc, Rtoc, -hi); 4326 %} 4327 4328 enc_class enc_load_double_const(regD dst, immD src, iRegLdst toc) %{ 4329 // TODO: PPC port $archOpcode(ppc64Opcode_lfd); 4330 MacroAssembler _masm(&cbuf); 4331 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 4332 address float_address = __ double_constant($src$$constant); 4333 Register Rtoc; 4334 Rtoc = reg_to_register_object($toc$$reg); 4335 __ lfd(Rdst, __ offset_to_method_toc(float_address), Rtoc); 4336 %} 4337 4338 // As enc_load_double_const, but for large constant pool. 4339 enc_class enc_load_double_const_comp(regD dst, immD src, iRegLdst toc) %{ 4340 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4341 MacroAssembler _masm(&cbuf); 4342 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 4343 address float_address = __ double_constant($src$$constant); 4344 int offset = __ offset_to_method_toc(float_address); 4345 Register Rtoc; 4346 Rtoc = reg_to_register_object($toc$$reg); 4347 4348 int hi = (offset + (1<<15))>>16; 4349 int lo = offset - hi * (1<<16); 4350 __ addis(Rtoc, Rtoc, hi); 4351 __ lfd(Rdst, lo, Rtoc); 4352 __ addis(Rtoc, Rtoc, -hi); 4353 %} 4354 4355 // Write-prefetch instruction; index register only. 4356 // This instruction is safe to execute with an invalid address. 4357 enc_class enc_mem_store_prefetch(indirect mem, iRegLsrc src) %{ 4358 // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst); 4359 MacroAssembler _masm(&cbuf); 4360 __ dcbtst(reg_to_register_object($src$$reg), reg_to_register_object($mem$$base)); 4361 %} 4362 4363 enc_class enc_mem_store_prefetch_no_offset(indirect mem) %{ 4364 // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst); 4365 MacroAssembler _masm(&cbuf); 4366 __ dcbtst(reg_to_register_object($mem$$base)); 4367 %} 4368 4369 // Version which sets the content to zero. 4370 enc_class enc_mem_store_prefetch_zero(indirect mem, iRegLsrc src) %{ 4371 // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst); 4372 MacroAssembler _masm(&cbuf); 4373 __ dcbz(reg_to_register_object($src$$reg), reg_to_register_object($mem$$base)); 4374 %} 4375 4376 enc_class enc_mem_store_prefetch_zero_no_offset(indirect mem) %{ 4377 // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst); 4378 MacroAssembler _masm(&cbuf); 4379 __ dcbz(reg_to_register_object($mem$$base)); 4380 %} 4381 4382 // Read-prefetch instruction; index register only. 4383 // This instruction is safe to execute with an invalid address. 4384 enc_class enc_mem_load_prefetch(indirect mem, iRegLsrc src) %{ 4385 // TODO: PPC port $archOpcode(ppc64Opcode_dcbt); 4386 MacroAssembler _masm(&cbuf); 4387 __ dcbt(reg_to_register_object($src$$reg), reg_to_register_object($mem$$base)); 4388 %} 4389 4390 enc_class enc_mem_load_prefetch_no_offset(indirect mem) %{ 4391 // TODO: PPC port $archOpcode(ppc64Opcode_dcbt); 4392 MacroAssembler _masm(&cbuf); 4393 __ dcbt(reg_to_register_object($mem$$base)); 4394 %} 4395 4396 // Implicit range checks. 4397 enc_class enc_rangeCheck_le_iReg_uimm15(cmpOp cmp, iRegIsrc src1, uimmI15 src2, Label lbl) %{ 4398 // TODO: PPC port $archOpcode(ppc64Opcode_twi); 4399 4400 MacroAssembler _masm(&cbuf); 4401 Register Rsrc1 = reg_to_register_object($src1$$reg); 4402 int Isrc2 = $src2$$constant; 4403 if ($cmp$$cmpcode == 0x1 /* less_equal */) { 4404 __ trap_range_check_le(Rsrc1, Isrc2); 4405 } else { 4406 // Both successors are uncommon traps, probability is 0. 4407 // Node got flipped during fixup flow. 4408 assert($cmp$$cmpcode == 0x9, "must be greater"); 4409 __ trap_range_check_g(Rsrc1, Isrc2); 4410 } 4411 %} 4412 4413 // Implicit range checks. 4414 enc_class enc_rangeCheck_ge_iReg_iReg(cmpOp cmp, iRegIsrc src1, iRegIsrc src2, Label lbl) %{ 4415 // TODO: PPC port $archOpcode(ppc64Opcode_tw); 4416 4417 MacroAssembler _masm(&cbuf); 4418 Register Rsrc1 = reg_to_register_object($src1$$reg); 4419 Register Rsrc2 = reg_to_register_object($src2$$reg); 4420 if ($cmp$$cmpcode == 0x0 /* greater_equal */) { 4421 __ trap_range_check_ge(Rsrc1, Rsrc2); 4422 } else { 4423 // Both successors are uncommon traps, probability is 0. 4424 // Node got flipped during fixup flow. 4425 assert($cmp$$cmpcode == 0x8, "must be less"); 4426 __ trap_range_check_l(Rsrc1, Rsrc2); 4427 } 4428 %} 4429 4430 // Implicit range checks. 4431 enc_class enc_rangeCheck_ge_iReg_uimm15(cmpOp cmp, iRegIsrc src1, uimmI15 src2, Label lbl) %{ 4432 // TODO: PPC port $archOpcode(ppc64Opcode_twi); 4433 4434 MacroAssembler _masm(&cbuf); 4435 Register Rsrc1 = reg_to_register_object($src1$$reg); 4436 int Isrc2 = $src2$$constant; 4437 if ($cmp$$cmpcode == 0x0 /* greater_equal */) { 4438 __ trap_range_check_ge(Rsrc1, Isrc2); 4439 } else { 4440 // Both successors are uncommon traps, probability is 0. 4441 // Node got flipped during fixup flow. 4442 assert($cmp$$cmpcode == 0x8, "must be less"); 4443 __ trap_range_check_l(Rsrc1, Isrc2); 4444 } 4445 %} 4446 4447 // Implicit zero checks. 4448 enc_class enc_zeroCheckP_eq_iReg(cmpOp cmp, iRegPdst value, immP_0 zero, Label lbl) %{ 4449 // TODO: PPC port $archOpcode(ppc64Opcode_tdi); 4450 MacroAssembler _masm(&cbuf); 4451 Register Rvalue = reg_to_register_object($value$$reg); 4452 if ($cmp$$cmpcode == 0xA) { 4453 __ trap_null_check(Rvalue); 4454 } else { 4455 // Both successors are uncommon traps, probability is 0. 4456 // Node got flipped during fixup flow. 4457 assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)"); 4458 __ trap_null_check(Rvalue, Assembler::traptoGreaterThanUnsigned); 4459 } 4460 %} 4461 4462 enc_class enc_Ret() %{ 4463 // TODO: PPC port $archOpcode(ppc64Opcode_blr); 4464 MacroAssembler _masm(&cbuf); 4465 // LR is restored in MachEpilogNode. Just do the RET here. 4466 __ blr(); 4467 %} 4468 4469 // Encoding class for traceable jumps (TODO: trace). 4470 enc_class enc_form_jmpl(iRegPdstNoScratch dest) %{ 4471 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4472 4473 MacroAssembler _masm(&cbuf); 4474 __ mtctr(as_Register($dest$$reg)); 4475 __ bctr(); 4476 %} 4477 4478 enc_class enc_jump_set_exception_pc(iRegPdstNoScratch dest) %{ 4479 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4480 4481 MacroAssembler _masm(&cbuf); 4482 Register Rdest = reg_to_register_object($dest$$reg); 4483 __ ld(R4_ARG2/* issuing pc */, _abi(lr), R1_SP); 4484 __ mtctr(Rdest); 4485 __ bctr(); 4486 %} 4487 4488 enc_class enc_shouldnotreachhere() %{ 4489 // TODO: PPC port $archOpcode(ppc64Opcode_tdi); 4490 4491 MacroAssembler _masm(&cbuf); 4492 __ trap_should_not_reach_here(); 4493 %} 4494 4495 // Inlined partial subtype check. 4496 enc_class enc_PartialSubtypeCheck(iRegPdst result, iRegPdst subklass, iRegPdst superklass, 4497 iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{ 4498 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4499 MacroAssembler _masm(&cbuf); 4500 4501 const Register reg_result = reg_to_register_object($result$$reg); 4502 const Register reg_subklass = reg_to_register_object($subklass$$reg); 4503 const Register reg_superklass = reg_to_register_object($superklass$$reg); 4504 const Register reg_klass = reg_to_register_object($tmp_klass$$reg); // index into cache array 4505 const Register reg_arrayptr = reg_to_register_object($tmp_arrayptr$$reg); // current value from cache array 4506 4507 __ check_klass_subtype_slow_path(reg_subklass, reg_superklass, reg_arrayptr, reg_klass, NULL, reg_result); 4508 %} 4509 4510 enc_class lateExpand_decode_oop_not_null(iRegPdst dst, iRegNsrc src) %{ 4511 decodeN_shiftNode *n1 = new (C) decodeN_shiftNode(); 4512 n1->add_req(n_region, n_src); 4513 n1->_opnds[0] = op_dst; 4514 n1->_opnds[1] = op_src; 4515 n1->_bottom_type = _bottom_type; 4516 4517 decodeN_addNode *n2 = new (C) decodeN_addNode(); 4518 n2->add_req(n_region, n1); 4519 n2->_opnds[0] = op_dst; 4520 n2->_opnds[1] = op_dst; 4521 n2->_bottom_type = _bottom_type; 4522 ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4523 ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4524 4525 assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!"); 4526 ra_->set_oop(n2, true); 4527 4528 nodes->push(n1); 4529 nodes->push(n2); 4530 %} 4531 4532 enc_class lateExpand_decode_oop(iRegPdst dst, iRegNsrc src, flagsReg crx) %{ 4533 decodeN_shiftNode *n_shift = new (C) decodeN_shiftNode(); 4534 cmpN_reg_imm0Node *n_compare = new (C) cmpN_reg_imm0Node(); 4535 4536 n_compare->add_req(n_region, n_src); 4537 n_compare->_opnds[0] = op_crx; 4538 n_compare->_opnds[1] = op_src; 4539 n_compare->_opnds[2] = new (C) immN_0Oper(TypeNarrowOop::NULL_PTR); 4540 4541 n_shift->add_req(n_region, n_src); 4542 n_shift->_opnds[0] = op_dst; 4543 n_shift->_opnds[1] = op_src; 4544 n_shift->_bottom_type = _bottom_type; 4545 4546 if (VM_Version::has_isel()) { 4547 // use isel instruction with Power 7 4548 4549 decodeN_addNode *n_add_base = new (C) decodeN_addNode(); 4550 n_add_base->add_req(n_region, n_shift); 4551 n_add_base->_opnds[0] = op_dst; 4552 n_add_base->_opnds[1] = op_dst; 4553 n_add_base->_bottom_type = _bottom_type; 4554 4555 cond_set_0_ptrNode *n_cond_set = new (C) cond_set_0_ptrNode(); 4556 n_cond_set->add_req(n_region, n_compare, n_add_base); 4557 n_cond_set->_opnds[0] = op_dst; 4558 n_cond_set->_opnds[1] = op_crx; 4559 n_cond_set->_opnds[2] = op_dst; 4560 n_cond_set->_bottom_type = _bottom_type; 4561 4562 assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!"); 4563 ra_->set_oop(n_cond_set, true); 4564 4565 ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4566 ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx)); 4567 ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4568 ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4569 4570 nodes->push(n_compare); 4571 nodes->push(n_shift); 4572 nodes->push(n_add_base); 4573 nodes->push(n_cond_set); 4574 4575 } else { 4576 // before Power 7 4577 cond_add_baseNode *n_add_base = new (C) cond_add_baseNode(); 4578 4579 n_add_base->add_req(n_region, n_compare, n_shift); 4580 n_add_base->_opnds[0] = op_dst; 4581 n_add_base->_opnds[1] = op_crx; 4582 n_add_base->_opnds[2] = op_dst; 4583 n_add_base->_bottom_type = _bottom_type; 4584 4585 assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!"); 4586 ra_->set_oop(n_add_base, true); 4587 4588 ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4589 ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx)); 4590 ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4591 4592 nodes->push(n_compare); 4593 nodes->push(n_shift); 4594 nodes->push(n_add_base); 4595 } 4596 %} 4597 4598 enc_class lateExpand_encode_oop_not_null(iRegNdst dst, iRegPdst src) %{ 4599 4600 encodeP_subNode *n1 = new (C) encodeP_subNode(); 4601 n1->add_req(n_region, n_src); 4602 n1->_opnds[0] = op_dst; 4603 n1->_opnds[1] = op_src; 4604 n1->_bottom_type = _bottom_type; 4605 4606 encodeP_shiftNode *n2 = new (C) encodeP_shiftNode(); 4607 n2->add_req(n_region, n1); 4608 n2->_opnds[0] = op_dst; 4609 n2->_opnds[1] = op_dst; 4610 n2->_bottom_type = _bottom_type; 4611 ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4612 ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4613 4614 nodes->push(n1); 4615 nodes->push(n2); 4616 assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed. 4617 %} 4618 4619 enc_class lateExpand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{ 4620 4621 if (VM_Version::has_isel()) { 4622 // use isel instruction with Power 7 4623 cmpP_reg_imm16Node *n_compare = new (C) cmpP_reg_imm16Node(); 4624 encodeP_subNode *n_sub_base = new (C) encodeP_subNode(); 4625 encodeP_shiftNode *n_shift = new (C) encodeP_shiftNode(); 4626 cond_set_0_oopNode *n_cond_set = new (C) cond_set_0_oopNode(); 4627 4628 n_compare->add_req(n_region, n_src); 4629 n_compare->_opnds[0] = op_crx; 4630 n_compare->_opnds[1] = op_src; 4631 n_compare->_opnds[2] = new (C) immL16Oper(0); 4632 4633 n_sub_base->add_req(n_region, n_src); 4634 n_sub_base->_opnds[0] = op_dst; 4635 n_sub_base->_opnds[1] = op_src; 4636 n_sub_base->_bottom_type = _bottom_type; 4637 4638 n_shift->add_req(n_region, n_sub_base); 4639 n_shift->_opnds[0] = op_dst; 4640 n_shift->_opnds[1] = op_dst; 4641 n_shift->_bottom_type = _bottom_type; 4642 4643 n_cond_set->add_req(n_region, n_compare, n_shift); 4644 n_cond_set->_opnds[0] = op_dst; 4645 n_cond_set->_opnds[1] = op_crx; 4646 n_cond_set->_opnds[2] = op_dst; 4647 n_cond_set->_bottom_type = _bottom_type; 4648 4649 ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx)); 4650 ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4651 ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4652 ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4653 4654 nodes->push(n_compare); 4655 nodes->push(n_sub_base); 4656 nodes->push(n_shift); 4657 nodes->push(n_cond_set); 4658 4659 } else { 4660 // before Power 7 4661 moveRegNode *n_move = new (C) moveRegNode(); 4662 cmpP_reg_imm16Node *n_compare = new (C) cmpP_reg_imm16Node(); 4663 encodeP_shiftNode *n_shift = new (C) encodeP_shiftNode(); 4664 cond_sub_baseNode *n_sub_base = new (C) cond_sub_baseNode(); 4665 4666 n_move->add_req(n_region, n_src); 4667 n_move->_opnds[0] = op_dst; 4668 n_move->_opnds[1] = op_src; 4669 ra_->set_oop(n_move, true); // Until here, 'n_move' still produces an oop. 4670 4671 n_compare->add_req(n_region, n_src); 4672 n_compare->add_prec(n_move); 4673 4674 n_compare->_opnds[0] = op_crx; 4675 n_compare->_opnds[1] = op_src; 4676 n_compare->_opnds[2] = new (C) immL16Oper(0); 4677 4678 n_sub_base->add_req(n_region, n_compare, n_src); 4679 n_sub_base->_opnds[0] = op_dst; 4680 n_sub_base->_opnds[1] = op_crx; 4681 n_sub_base->_opnds[2] = op_src; 4682 n_sub_base->_bottom_type = _bottom_type; 4683 4684 n_shift->add_req(n_region, n_sub_base); 4685 n_shift->_opnds[0] = op_dst; 4686 n_shift->_opnds[1] = op_dst; 4687 n_shift->_bottom_type = _bottom_type; 4688 4689 ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4690 ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx)); 4691 ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4692 ra_->set_pair(n_move->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); 4693 4694 nodes->push(n_move); 4695 nodes->push(n_compare); 4696 nodes->push(n_sub_base); 4697 nodes->push(n_shift); 4698 } 4699 4700 assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed. 4701 %} 4702 4703 enc_class emit_decode_oop(iRegPdst dst, iRegNsrc src) %{ 4704 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4705 MacroAssembler _masm(&cbuf); 4706 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong"); 4707 const Register reg_dst = reg_to_register_object($dst$$reg); 4708 const Register reg_src = reg_to_register_object($src$$reg); 4709 __ sldi(reg_dst, reg_src, LogMinObjAlignmentInBytes); 4710 if (Universe::narrow_oop_base() != NULL) { 4711 Label done; 4712 __ cmpdi(CCR0, reg_src, 0); 4713 __ beq(CCR0, done); 4714 __ add(reg_dst, reg_dst, R30); 4715 __ bind(done); 4716 } 4717 %} 4718 4719 // Late expand emitter for runtime leaf calls. 4720 enc_class lateExpand_java_to_runtime_call(method meth) %{ 4721 Node *toc = in(TypeFunc::ReturnAdr); 4722 loadConLNodesTuple loadConLNodes_Entry; 4723 #if defined(ABI_ELFv2) 4724 jlong entry_address = (jlong) this->entry_point(); 4725 assert(entry_address, "need address here"); 4726 loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, toc, new (C) immLOper(entry_address), 4727 false, OptoReg::Name(R12_H_num), OptoReg::Name(R12_num)); 4728 #else 4729 // Get the struct that describes the function we are about to call. 4730 FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point(); 4731 assert(fd, "need fd here"); 4732 jlong entry_address = (jlong) fd->entry(); 4733 4734 // new nodes 4735 loadConLNodesTuple loadConLNodes_Env; 4736 loadConLNodesTuple loadConLNodes_Toc; 4737 4738 // Create nodes and operands for loading the entry point. 4739 loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, toc, new (C) immLOper(entry_address), 4740 false, OptoReg::Name(R12_H_num), OptoReg::Name(R12_num)); 4741 4742 // Create nodes and operands for loading the env pointer. 4743 if (fd->env() != NULL) { 4744 loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, toc, new (C) immLOper((jlong) fd->env()), 4745 false, OptoReg::Name(R11_H_num), OptoReg::Name(R11_num)); 4746 } else { 4747 loadConLNodes_Env._large_hi = NULL; 4748 loadConLNodes_Env._large_lo = NULL; 4749 loadConLNodes_Env._small = NULL; 4750 loadConLNodes_Env._last = new (C) loadConL16Node(); 4751 loadConLNodes_Env._last->_opnds[0] = new (C) iRegLdstOper(); 4752 loadConLNodes_Env._last->_opnds[1] = new (C) immL16Oper(0); 4753 ra_->set_pair(loadConLNodes_Env._last->_idx, OptoReg::Name(R11_H_num), OptoReg::Name(R11_num)); 4754 } 4755 4756 // Create nodes and operands for loading the Toc point. 4757 loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, toc, new (C) immLOper((jlong) fd->toc()), 4758 false, OptoReg::Name(R2_H_num), OptoReg::Name(R2_num)); 4759 #endif // ABI_ELFv2 4760 // mtctr node 4761 MachNode *mtctr = new (C) CallLeafDirect_mtctrNode(); 4762 assert(loadConLNodes_Entry._last != NULL, "entry must exist"); 4763 mtctr->add_req(0, loadConLNodes_Entry._last); 4764 4765 mtctr->_opnds[0] = new (C) iRegLdstOper(); 4766 mtctr->_opnds[1] = new (C) iRegLdstOper(); 4767 4768 // call node 4769 MachCallLeafNode *call = new (C) CallLeafDirectNode(); 4770 4771 call->_opnds[0] = _opnds[0]; 4772 call->_opnds[1] = new (C) methodOper((intptr_t) entry_address); // May get set later. 4773 4774 // Make the new call node look like the old one. 4775 call->_name = _name; 4776 call->_tf = _tf; 4777 call->_entry_point = _entry_point; 4778 call->_cnt = _cnt; 4779 call->_argsize = _argsize; 4780 call->_oop_map = _oop_map; 4781 call->_jvms = _jvms; 4782 call->_jvmadj = _jvmadj; 4783 call->_in_rms = _in_rms; 4784 call->_nesting = _nesting; 4785 4786 // New call needs all inputs of old call. 4787 // Req... 4788 for (uint i = 0; i < req(); ++i) { 4789 if (i != TypeFunc::ReturnAdr) { 4790 call->add_req(in(i)); 4791 } else { 4792 // put the mtctr where ReturnAdr would be 4793 call->add_req(mtctr); 4794 } 4795 } 4796 4797 // ...as well as prec 4798 for (uint i = req(); i < len(); ++i) { 4799 call->add_prec(in(i)); 4800 } 4801 4802 #if !defined(ABI_ELFv2) 4803 // ... and more prec. 4804 if (loadConLNodes_Env._last) call->add_prec(loadConLNodes_Env._last); 4805 if (loadConLNodes_Toc._last) call->add_prec(loadConLNodes_Toc._last); 4806 #endif 4807 4808 // registers 4809 ra_->set1(mtctr->_idx, OptoReg::Name(SR_CTR_num)); 4810 4811 // Insert the new nodes. 4812 if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi); 4813 if (loadConLNodes_Entry._last) nodes->push(loadConLNodes_Entry._last); 4814 #if !defined(ABI_ELFv2) 4815 if (loadConLNodes_Env._large_hi) nodes->push(loadConLNodes_Env._large_hi); 4816 if (loadConLNodes_Env._last) nodes->push(loadConLNodes_Env._last); 4817 if (loadConLNodes_Toc._large_hi) nodes->push(loadConLNodes_Toc._large_hi); 4818 if (loadConLNodes_Toc._last) nodes->push(loadConLNodes_Toc._last); 4819 #endif 4820 nodes->push(mtctr); 4821 nodes->push(call); 4822 %} 4823 4824 // Move to ctr for leaf call. 4825 enc_class enc_leaf_call_mtctr(iRegLsrc src) %{ 4826 // TODO: PPC port $archOpcode(ppc64Opcode_mtctr); 4827 MacroAssembler _masm(&cbuf); 4828 Register Rsrc = reg_to_register_object($src$$reg); 4829 __ mtctr(Rsrc); 4830 %} 4831 4832 // Branch to ctr and link for leaf call. 4833 enc_class enc_leaf_call(method meth) %{ 4834 // TODO: PPC port $archOpcode(ppc64Opcode_bctrl); 4835 MacroAssembler _masm(&cbuf); 4836 __ bctrl(); 4837 %} 4838 4839 // a runtime call 4840 enc_class enc_java_to_runtime_call (method meth) %{ 4841 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4842 4843 MacroAssembler _masm(&cbuf); 4844 const address start_pc = __ pc(); 4845 4846 #if defined(ABI_ELFv2) 4847 address entry= !($meth$$method) ? NULL : (address)$meth$$method; 4848 __ call_c(entry, relocInfo::runtime_call_type); 4849 #else 4850 // The function we're going to call. 4851 FunctionDescriptor fdtemp; 4852 const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method; 4853 4854 Register Rtoc = R12_scratch2; 4855 // Calculate the method's TOC. 4856 __ calculate_address_from_global_toc(Rtoc, __ method_toc()); 4857 // Put entry, env, toc into the constant pool, this needs up to 3 constant 4858 // pool entries; call_c_using_toc will optimize the call. 4859 __ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc); 4860 #endif 4861 4862 // Check the ret_addr_offset. 4863 assert(((MachCallRuntimeNode*)this)->ret_addr_offset() == __ last_calls_return_pc() - start_pc, 4864 "Fix constant in ret_addr_offset()"); 4865 %} 4866 4867 // A Java static call or a runtime call. 4868 // 4869 // Branch-and-link relative to a trampoline. 4870 // The trampoline loads the target address and does a long branch to there. 4871 // In case we call java, the trampoline branches to a interpreter_stub 4872 // which loads the inline cache and the real call target from the constant pool. 4873 // 4874 // This basically looks like this: 4875 // 4876 // >>>> consts -+ -+ 4877 // | |- offset1 4878 // [call target1] | <-+ 4879 // [IC cache] |- offset2 4880 // [call target2] <--+ 4881 // 4882 // <<<< consts 4883 // >>>> insts 4884 // 4885 // bl offset16 -+ -+ ??? // How many bits available? 4886 // | | 4887 // <<<< insts | | 4888 // >>>> stubs | | 4889 // | |- trampoline_stub_Reloc 4890 // trampoline stub: | <-+ 4891 // r2 = toc | 4892 // r2 = [r2 + offset1] | // Load call target1 from const section 4893 // mtctr r2 | 4894 // bctr |- static_stub_Reloc 4895 // comp_to_interp_stub: <---+ 4896 // r1 = toc 4897 // ICreg = [r1 + IC_offset] // Load IC from const section 4898 // r1 = [r1 + offset2] // Load call target2 from const section 4899 // mtctr r1 4900 // bctr 4901 // 4902 // <<<< stubs 4903 // 4904 // The call instruction in the code either 4905 // - Branches directly to a compiled method if the offset is encodable in instruction. 4906 // - Branches to the trampoline stub if the offset to the compiled method is not encodable. 4907 // - Branches to the compiled_to_interp stub if the target is interpreted. 4908 // 4909 // Further there are three relocations from the loads to the constants in 4910 // the constant section. 4911 // 4912 // Usage of r1 and r2 in the stubs allows to distinguish them. 4913 enc_class enc_java_static_call(method meth) %{ 4914 // TODO: PPC port $archOpcode(ppc64Opcode_bl); 4915 4916 MacroAssembler _masm(&cbuf); 4917 relocInfo::relocType reloc; 4918 4919 if (!_method) { 4920 // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap. 4921 reloc = relocInfo::runtime_call_type; 4922 } else if (_optimized_virtual) { 4923 reloc = relocInfo::opt_virtual_call_type; 4924 } else { 4925 reloc = relocInfo::static_call_type; 4926 } 4927 4928 // Puts 1 constant into the constant pool (call target). 4929 const EmitCallOffsets offsets = emit_call_with_trampoline_stub(_masm, (address)$meth$$method, reloc); 4930 4931 if (ra_->C->env()->failing()) 4932 return; 4933 4934 assert(offsets.insts_call_instruction_offset != -1, "must be initialized"); 4935 4936 if (_method) { 4937 // Create stub for static call. Puts 1 or 2 constants into constant 4938 // pool (inline cache, call target). 4939 emit_java_to_interp_stub(_masm, offsets.insts_call_instruction_offset); 4940 } 4941 %} 4942 4943 // Emit a method handle call. 4944 // 4945 // Method handle calls from compiled to compiled are going thru a 4946 // c2i -> i2c adapter, extending the frame for their arguments. The 4947 // caller however, returns directly to the compiled callee, that has 4948 // to cope with the extended frame. We restore the original frame by 4949 // loading the callers sp and adding the calculated framesize. 4950 enc_class enc_java_handle_call(method meth) %{ 4951 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 4952 4953 MacroAssembler _masm(&cbuf); 4954 relocInfo::relocType reloc; 4955 assert(_optimized_virtual, "methodHandle call should be a virtual call"); 4956 reloc = relocInfo::opt_virtual_call_type; 4957 4958 const EmitCallOffsets offsets = emit_call_with_trampoline_stub(_masm, (address)$meth$$method, reloc); 4959 // restore original sp 4960 __ ld(R11_scratch1, 0, R1_SP); // load caller sp 4961 Compile* C = ra_->C; 4962 const long framesize = C->frame_slots() << LogBytesPerInt; 4963 unsigned int bytes = (unsigned int)framesize; 4964 long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes); 4965 if (Assembler::is_simm(-offset, 16)) { 4966 __ addi(R1_SP, R11_scratch1, -offset); 4967 } else { 4968 __ load_const_optimized(R12_scratch2, -offset); 4969 __ add(R1_SP, R11_scratch1, R12_scratch2); 4970 } 4971 #ifdef ASSERT 4972 __ ld(R12_scratch2, 0, R1_SP); // Load from unextended_sp. 4973 __ cmpd(CCR0, R11_scratch1, R12_scratch2); 4974 __ asm_assert_eq("backlink changed", 0x8000); 4975 #endif 4976 // If fails should store backlink before unextending. 4977 4978 if (ra_->C->env()->failing()) 4979 return; 4980 4981 assert(offsets.insts_call_instruction_offset != -1, "must be initialized"); 4982 4983 if (_method) { 4984 // Create stub for static call. 4985 emit_java_to_interp_stub(_masm, offsets.insts_call_instruction_offset); 4986 } 4987 %} 4988 4989 // Late expand emitter for virtual calls. 4990 enc_class lateExpand_java_dynamic_call_sched(method meth) %{ 4991 // Toc is in return address field, though not accessible via lateExpand 4992 // functionaliy. 4993 Node *toc = in(TypeFunc::ReturnAdr); 4994 4995 // Create the nodes for loading the IC from the TOC. 4996 loadConLNodesTuple loadConLNodes_IC = 4997 loadConLNodesTuple_create(C, ra_, toc, new (C) immLOper((jlong)Universe::non_oop_word()), 4998 true, OptoReg::Name(R19_H_num), OptoReg::Name(R19_num)); 4999 5000 // Create the call node. 5001 CallDynamicJavaDirectSchedNode *call = new (C) CallDynamicJavaDirectSchedNode(); 5002 call->_method_handle_invoke = _method_handle_invoke; 5003 call->_vtable_index = _vtable_index; 5004 call->_method = _method; 5005 call->_bci = _bci; 5006 call->_optimized_virtual = _optimized_virtual; 5007 call->_tf = _tf; 5008 call->_entry_point = _entry_point; 5009 call->_cnt = _cnt; 5010 call->_argsize = _argsize; 5011 call->_oop_map = _oop_map; 5012 call->_jvms = _jvms; 5013 call->_jvmadj = _jvmadj; 5014 call->_in_rms = _in_rms; 5015 call->_nesting = _nesting; 5016 5017 // New call needs all inputs of old call. 5018 // Req... 5019 for (uint i = 0; i < req(); ++i) { 5020 if (i != TypeFunc::ReturnAdr) { 5021 call->add_req(in(i)); 5022 } else { 5023 // The expanded node does not need toc any more. 5024 call->add_req(C->top()); 5025 } 5026 } 5027 // ...as well as prec 5028 for (uint i = req(); i < len() ; ++i) { 5029 call->add_prec(in(i)); 5030 } 5031 5032 // The cache must come before the call, but it's not a req edge. 5033 call->add_prec(loadConLNodes_IC._last); 5034 // Remember nodes loading the inline cache into r19. 5035 call->_load_ic_hi_node = loadConLNodes_IC._large_hi; 5036 call->_load_ic_node = loadConLNodes_IC._small; 5037 5038 // Operands for new nodes. 5039 call->_opnds[0] = _opnds[0]; 5040 call->_opnds[1] = _opnds[1]; 5041 5042 // Only the inline cache is associated with a register. 5043 assert(Matcher::inline_cache_reg() == OptoReg::Name(R19_num), "ic reg should be R19"); 5044 5045 // Push new nodes. 5046 if (loadConLNodes_IC._large_hi) nodes->push(loadConLNodes_IC._large_hi); 5047 if (loadConLNodes_IC._last) nodes->push(loadConLNodes_IC._last); 5048 nodes->push(call); 5049 %} 5050 5051 // Second node of expanded dynamic call - the call. 5052 enc_class enc_java_dynamic_call_sched(method meth) %{ 5053 // TODO: PPC port $archOpcode(ppc64Opcode_bl); 5054 5055 MacroAssembler _masm(&cbuf); 5056 5057 if (!ra_->C->in_scratch_emit_size()) { 5058 // Create a call trampoline stub for the given method. 5059 const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method; 5060 const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none); 5061 const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const); 5062 emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset()); 5063 5064 if (ra_->C->env()->failing()) 5065 return; 5066 5067 // Build relocation at call site with ic position as data. 5068 assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) || 5069 (_load_ic_hi_node == NULL && _load_ic_node != NULL), 5070 "must have one, but can't have both"); 5071 assert((_load_ic_hi_node != NULL && _load_ic_hi_node->_cbuf_insts_offset != -1) || 5072 (_load_ic_node != NULL && _load_ic_node->_cbuf_insts_offset != -1), 5073 "must contain instruction offset"); 5074 const int virtual_call_oop_addr_offset = _load_ic_hi_node != NULL 5075 ? _load_ic_hi_node->_cbuf_insts_offset 5076 : _load_ic_node->_cbuf_insts_offset; 5077 const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset); 5078 assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr), 5079 "should be load from TOC"); 5080 5081 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 5082 } 5083 5084 // At this point I do not have the address of the trampoline stub, 5085 // and the entry point might be too far away for bl. Pc() serves 5086 // as dummy and bl will be patched later. 5087 __ bl((address) __ pc()); 5088 %} 5089 5090 // Compound version of call dynamic 5091 enc_class enc_java_dynamic_call(method meth) %{ 5092 // TODO: PPC port // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5093 MacroAssembler _masm(&cbuf); 5094 int start_offset = __ offset(); 5095 5096 Register Rtoc = (ra_) 5097 ? as_Register(Matcher::_regEncode[ra_->get_reg_first(in(4))]) 5098 : R2_TOC; 5099 5100 int vtable_index = this->_vtable_index; 5101 #if 0 // TODO: PPC port 5102 if (!methodOopDesc::is_valid_virtual_vtable_index(vtable_index)) { 5103 // Must be invalid_vtable_index, not nonvirtual_vtable_index. 5104 assert(vtable_index == methodOopDesc::invalid_vtable_index, "correct sentinel value"); 5105 Register ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode()); 5106 AddressLiteral oop = __ allocate_oop_address((jobject)Universe::non_oop_word()); 5107 5108 address virtual_call_oop_addr = __ pc(); 5109 __ load_const_from_method_toc(ic_reg, oop, Rtoc); 5110 // CALL to fixup routine. Fixup routine uses ScopeDesc info 5111 // to determine who we intended to call. 5112 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 5113 5114 EmitCallOffsets offsets = { -1, -1 }; 5115 int ret_addr_offset = __ offset(); 5116 offsets = emit_call_with_trampoline_stub(_masm, (address)$meth$$method, relocInfo::none); 5117 ret_addr_offset += offsets.ret_addr_offset; 5118 set_lazy_constant(MachCallDynamicJavaNode, ret_addr_offset_no_vtable, 5119 ret_addr_offset - start_offset); 5120 assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == ret_addr_offset - start_offset, 5121 "Fix constant in ret_addr_offset()"); 5122 } else { 5123 assert(!UseInlineCaches, "expect vtable calls only if not using ICs"); 5124 // Go thru the vtable. Get receiver klass. Receiver already 5125 // checked for non-null. If we'll go thru a C2I adapter, the 5126 // interpreter expects method in R19_method. 5127 5128 __ load_heap_oop_not_null(R11_scratch1, oopDesc::klass_offset_in_bytes(), R3); 5129 5130 int entry_offset = instanceKlass::vtable_start_offset() 5131 + vtable_index * vtableEntry::size(); 5132 int v_off = entry_offset * wordSize 5133 + vtableEntry::method_offset_in_bytes(); 5134 __ li(R19_method, v_off); 5135 __ ldx(R19_method/*method oop*/, 5136 R19_method/*method offset*/, 5137 R11_scratch1/*class*/); 5138 // NOTE: for vtable dispatches, the vtable entry will never be 5139 // null. However it may very well end up in handle_wrong_method 5140 // if the method is abstract for the particular class. 5141 __ ld(R11_scratch1, 5142 in_bytes(methodOopDesc::from_compiled_offset()), 5143 R19_method); 5144 // Call target. Either compiled code or C2I adapter. 5145 __ mtctr(R11_scratch1); 5146 __ bctrl(); 5147 int return_pc_offset = __ offset(); 5148 assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == return_pc_offset - start_offset, 5149 "Fix constant in ret_addr_offset()"); 5150 } 5151 #endif 5152 Unimplemented(); 5153 %} 5154 5155 enc_class enc_cmove_bne_neg(iRegIdst dst, flagsReg crx, iRegIsrc src1) %{ 5156 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5157 5158 MacroAssembler _masm(&cbuf); 5159 Register Rdst = reg_to_register_object($dst$$reg); 5160 Register Rsrc1 = reg_to_register_object($src1$$reg); 5161 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5162 5163 Label done; 5164 __ bne(Rcrx, done); 5165 __ neg(Rdst, Rsrc1); 5166 5167 #if 0 // TODO: PPC port 5168 if (_size == 12) { 5169 // Bundler will have changed our size to 8 if stop is not required. 5170 __ endgroup(); 5171 } 5172 #endif 5173 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5174 5175 __ bind(done); 5176 %} 5177 5178 enc_class enc_divw(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 5179 // TODO: PPC port $archOpcode(ppc64Opcode_divw); 5180 5181 MacroAssembler _masm(&cbuf); 5182 Register Rdst = reg_to_register_object($dst$$reg); 5183 Register Rsrc1 = reg_to_register_object($src1$$reg); 5184 Register Rsrc2 = reg_to_register_object($src2$$reg); 5185 __ divw(Rdst, Rsrc1, Rsrc2); 5186 %} 5187 5188 enc_class enc_divd(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 5189 // TODO: PPC port $archOpcode(ppc64Opcode_divd); 5190 5191 MacroAssembler _masm(&cbuf); 5192 Register Rdst = reg_to_register_object($dst$$reg); 5193 Register Rsrc1 = reg_to_register_object($src1$$reg); 5194 Register Rsrc2 = reg_to_register_object($src2$$reg); 5195 __ divd(Rdst, Rsrc1, Rsrc2); 5196 %} 5197 5198 enc_class enc_compiler_fast_lock(flagsReg pcc_arg, iRegPdst oop_arg, iRegPdst box_arg, iRegPdst tmp1, 5199 iRegPdst tmp2, iRegPdst tmp3) %{ 5200 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5201 MacroAssembler _masm(&cbuf); 5202 5203 Register displaced_header = reg_to_register_object($tmp1$$reg); 5204 Register current_header = reg_to_register_object($tmp2$$reg); 5205 Register temp = reg_to_register_object($tmp3$$reg); 5206 Register box = reg_to_register_object($box_arg$$reg); 5207 Register oop = reg_to_register_object($oop_arg$$reg); 5208 ConditionRegister Rcrx = reg_to_ConditionRegister_object($pcc_arg$$reg); 5209 5210 __ compiler_fast_lock_object(Rcrx, oop, box, temp, displaced_header, current_header); 5211 5212 // if locking was successfull, Rcrx should indicate 'EQ'. 5213 // the compiler generates a branch to the runtime call to 5214 // _complete_monitor_locking_Java for the case where Rcrx is 'NE'. 5215 %} 5216 5217 enc_class enc_compiler_fast_unlock(flagsReg pcc_arg, iRegPdst oop_arg, iRegPdst box_arg, iRegPdst tmp1, 5218 iRegPdst tmp2, iRegPdst tmp3) %{ 5219 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5220 MacroAssembler _masm(&cbuf); 5221 5222 Register displaced_header = reg_to_register_object($tmp1$$reg); 5223 Register current_header = reg_to_register_object($tmp2$$reg); 5224 Register temp = reg_to_register_object($tmp3$$reg); 5225 Register box = reg_to_register_object($box_arg$$reg); 5226 Register oop = reg_to_register_object($oop_arg$$reg); 5227 ConditionRegister Rcrx = reg_to_ConditionRegister_object($pcc_arg$$reg); 5228 5229 __ compiler_fast_unlock_object(Rcrx, oop, box, temp, displaced_header, current_header); 5230 5231 // If unlocking was successfull, Rcrx should indicate 'EQ'. 5232 // the compiler generates a branch to the runtime call to 5233 // _complete_monitor_unlocking_Java for the case where Rcrx is 'NE'. 5234 %} 5235 5236 enc_class enc_cmove_bns_less(flagsReg crx) %{ 5237 // TODO: PPC port $archOpcode(ppc64Opcode_cmovecr); 5238 5239 MacroAssembler _masm(&cbuf); 5240 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5241 Label done; 5242 __ bns(Rcrx, done); // not unordered -> keep crx 5243 __ li(R0, 0); 5244 __ cmpwi(Rcrx, R0, 1); // unordered -> set crx to 'less' 5245 #if 0 // TODO: PPC port 5246 if (_size == 16) { 5247 // Bundler will have changed our size if stop is not required 5248 __ endgroup(); 5249 } 5250 #endif 5251 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5252 __ bind(done); 5253 %} 5254 5255 enc_class lateExpand_cmpF_reg_reg(flagsReg crx, regF src1, regF src2) %{ 5256 // 5257 // replaces 5258 // 5259 // region src1 src2 5260 // \ | | 5261 // crx=cmpF_reg_reg 5262 // 5263 // with 5264 // 5265 // region src1 src2 5266 // \ | | 5267 // crx=cmpFUnordered_reg_reg 5268 // | 5269 // ^ region 5270 // | \ 5271 // crx=cmov_bns_less 5272 // 5273 5274 // Create new nodes. 5275 MachNode *m1 = new (C) cmpFUnordered_reg_regNode(); 5276 MachNode *m2 = new (C) cmov_bns_lessNode(); 5277 5278 // inputs for new nodes 5279 m1->add_req(n_region, n_src1, n_src2); 5280 m2->add_req(n_region); 5281 5282 // precedences for new nodes 5283 m2->add_prec(m1); 5284 5285 // operands for new nodes 5286 m1->_opnds[0] = op_crx; 5287 m1->_opnds[1] = op_src1; 5288 m1->_opnds[2] = op_src2; 5289 5290 m2->_opnds[0] = op_crx; 5291 5292 // registers for new nodes 5293 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx 5294 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx 5295 5296 // Insert new nodes. 5297 nodes->push(m1); 5298 nodes->push(m2); 5299 %} 5300 5301 enc_class lateExpand_cmpD_reg_reg(flagsReg crx, regD src1, regD src2) %{ 5302 // 5303 // replaces 5304 // 5305 // region src1 src2 5306 // \ | | 5307 // crx=cmpD_reg_reg 5308 // 5309 // with 5310 // 5311 // region src1 src2 5312 // \ | | 5313 // crx=cmpDUnordered_reg_reg 5314 // | 5315 // ^ region 5316 // | \ 5317 // crx=cmov_bns_less 5318 // 5319 5320 // create new nodes 5321 MachNode *m1 = new (C) cmpDUnordered_reg_regNode(); 5322 MachNode *m2 = new (C) cmov_bns_lessNode(); 5323 5324 // inputs for new nodes 5325 m1->add_req(n_region, n_src1, n_src2); 5326 m2->add_req(n_region); 5327 5328 // precedences for new nodes 5329 m2->add_prec(m1); 5330 5331 // operands for new nodes 5332 m1->_opnds[0] = op_crx; 5333 m1->_opnds[1] = op_src1; 5334 m1->_opnds[2] = op_src2; 5335 5336 m2->_opnds[0] = op_crx; 5337 5338 // registers for new nodes 5339 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx 5340 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx 5341 5342 // Insert new nodes. 5343 nodes->push(m1); 5344 nodes->push(m2); 5345 %} 5346 5347 enc_class lateExpand_cmovI_conIvalueMinus1_conIvalue0_conIvalue1(iRegIdst dst, flagsReg crx) %{ 5348 // 5349 // replaces 5350 // 5351 // region crx 5352 // \ | 5353 // dst=cmovI_conIvalueMinus1_conIvalue0_conIvalue1 5354 // 5355 // with 5356 // 5357 // region 5358 // \ 5359 // dst=loadConI16(0) 5360 // | 5361 // ^ region crx 5362 // | \ | 5363 // dst=cmovI_conIvalueMinus1_conIvalue1 5364 // 5365 5366 // Create new nodes. 5367 MachNode *m1 = new (C) loadConI16Node(); 5368 MachNode *m2 = new (C) cmovI_conIvalueMinus1_conIvalue1Node(); 5369 5370 // inputs for new nodes 5371 m1->add_req(n_region); 5372 m2->add_req(n_region, n_crx); 5373 5374 // precedences for new nodes 5375 m2->add_prec(m1); 5376 5377 // operands for new nodes 5378 m1->_opnds[0] = op_dst; 5379 m1->_opnds[1] = new (C) immI16Oper(0); 5380 5381 m2->_opnds[0] = op_dst; 5382 m2->_opnds[1] = op_crx; 5383 5384 // registers for new nodes 5385 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst 5386 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst 5387 5388 // Insert new nodes. 5389 nodes->push(m1); 5390 nodes->push(m2); 5391 %} 5392 5393 enc_class enc_cmovI_conIvalueMinus1_conIvalue1(iRegIdst dst, flagsReg crx) %{ 5394 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5395 5396 MacroAssembler _masm(&cbuf); 5397 Register Rdst = reg_to_register_object($dst$$reg); 5398 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5399 Label done; 5400 // li(Rdst, 0); // equal -> 0 5401 __ beq(Rcrx, done); 5402 __ li(Rdst, 1); // greater -> +1 5403 __ bgt(Rcrx, done); 5404 __ li(Rdst, -1); // unordered or less -> -1 5405 #if 0 // TODO: PPC port 5406 if (_size == 20) { 5407 // Bundler will have changed our size to 16 if stop is not required. 5408 __ endgroup(); 5409 } 5410 #endif 5411 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5412 __ bind(done); 5413 %} 5414 5415 enc_class ShouldNotEncodeThis () %{ 5416 ShouldNotCallThis(); 5417 %} 5418 5419 // ClearArray loop. 5420 // The inlineCallClearArray instruction enforces an alignment. 5421 // Compiler ensures base is doubleword aligned and cnt is count of doublewords. 5422 enc_class enc_ClearArray_reg_reg(Register cnt, Register base) %{ 5423 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5424 5425 MacroAssembler _masm(&cbuf); 5426 Register cnt_dwords_reg = reg_to_register_object($cnt$$reg); 5427 Register base_ptr_reg = reg_to_register_object($base$$reg); 5428 5429 __ clear_memory_doubleword(base_ptr_reg, cnt_dwords_reg); // kills cnt, base, R0 5430 %} 5431 5432 enc_class enc_CopyRawMemoryXAlignedDisjoint_regP_regP_regL(iRegPdst src, iRegPdst dst, iRegLdst cnt, 5433 iRegPdst tmp1, iRegPdst tmp2) %{ 5434 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5435 5436 MacroAssembler _masm(&cbuf); 5437 5438 Register src_reg = reg_to_register_object($src$$reg); 5439 Register dst_reg = reg_to_register_object($dst$$reg); 5440 Register cnt_reg = reg_to_register_object($cnt$$reg); 5441 Register tmp1_reg = reg_to_register_object($tmp1$$reg); 5442 Register tmp2_reg = reg_to_register_object($tmp2$$reg); 5443 5444 Label loop; 5445 Label done; 5446 5447 __ cmpdi(CCR0, cnt_reg, 0); 5448 __ beq(CCR0, done); 5449 5450 __ load_const_optimized(tmp1_reg, 0L); // init index reg 5451 __ mtctr(cnt_reg); 5452 5453 __ bind(loop); 5454 __ ldx(R0, src_reg, tmp1_reg); 5455 __ stdx(R0, dst_reg, tmp1_reg); 5456 __ addi(tmp1_reg, tmp1_reg, 8); 5457 __ bdnz(loop); 5458 __ bind(done); 5459 %} 5460 5461 // Precompute the needle if constant here, as node is available. 5462 enc_class enc_String_IndexOf_imm1_char(iRegIdst result, iRegPsrc haystack, iRegIdst haycnt, immP needleImm, immL offsetImm, 5463 iRegIdst tmp1, iRegIdst tmp2) %{ 5464 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5465 MacroAssembler _masm(&cbuf); 5466 5467 immPOper *needleOper = (immPOper *)$needleImm; 5468 const TypeOopPtr *t = needleOper->type()->isa_oopptr(); 5469 ciTypeArray* needle_values = t->const_oop()->as_type_array(); // Pointer to live char * 5470 assert(needle_values, ""); 5471 5472 immLOper *offsetOper = (immLOper *)$offsetImm; 5473 long off = offsetOper->constant(); 5474 guarantee(!UseCompressedOops || off == 16, "not beginning of char[]"); 5475 5476 __ string_indexof_1($result$$Register, 5477 $haystack$$Register, $haycnt$$Register, 5478 R0, needle_values->char_at(0), 5479 $tmp1$$Register, $tmp2$$Register); 5480 %} 5481 5482 // Precompute the needle if constant here, as node is available. 5483 enc_class enc_String_IndexOf_imm1(iRegIdst result, iRegPsrc haystack, iRegIdst haycnt, iRegPsrc needle, 5484 iRegIdst tmp1, iRegIdst tmp2) %{ 5485 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5486 MacroAssembler _masm(&cbuf); 5487 5488 Node *ndl = in(operand_index($needle)); // The node that defines needle. 5489 ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array(); 5490 guarantee(needle_values, "sanity"); 5491 if (needle_values != NULL) { 5492 __ string_indexof_1($result$$Register, 5493 $haystack$$Register, $haycnt$$Register, 5494 R0, needle_values->char_at(0), 5495 $tmp1$$Register, $tmp2$$Register); 5496 } else { 5497 __ string_indexof_1($result$$Register, 5498 $haystack$$Register, $haycnt$$Register, 5499 $needle$$Register, 0, 5500 $tmp1$$Register, $tmp2$$Register); 5501 } 5502 %} 5503 5504 enc_class enc_String_IndexOf_imm(iRegIdst result, iRegPsrc haystack, iRegIdst haycnt, iRegPsrc needle, uimmI16 needlecntImm, 5505 iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5) %{ 5506 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5507 MacroAssembler _masm(&cbuf); 5508 int needlecnt = $needlecntImm$$constant; 5509 Node *ndl = in(operand_index($needle)); // The node that defines needle. 5510 ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array(); 5511 5512 __ string_indexof($result$$Register, 5513 $haystack$$Register, $haycnt$$Register, 5514 $needle$$Register, needle_values, $tmp5$$Register, needlecnt, 5515 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register); 5516 %} 5517 5518 enc_class enc_String_IndexOf(iRegIsrc result, iRegPsrc haystack, iRegIsrc haycnt, iRegPsrc needle, iRegIsrc needlecnt, 5519 iRegIdst tmp1, iRegIsrc tmp2, iRegIsrc tmp3, iRegIsrc tmp4) %{ 5520 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5521 MacroAssembler _masm(&cbuf); 5522 __ string_indexof($result$$Register, 5523 $haystack$$Register, $haycnt$$Register, 5524 $needle$$Register, NULL, $needlecnt$$Register, 0, // needlecnt not constant. 5525 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register); 5526 %} 5527 5528 enc_class enc_String_EqualsImm(iRegPsrc str1, iRegPsrc str2, uimmI15 cntImm, iRegIdst result, 5529 iRegPdst tmp1, iRegPdst tmp2) %{ 5530 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5531 int cntval = $cntImm$$constant; 5532 MacroAssembler _masm(&cbuf); 5533 __ char_arrays_equalsImm($str1$$Register, $str2$$Register, cntval, $result$$Register, 5534 $tmp1$$Register, $tmp2$$Register); 5535 %} 5536 5537 enc_class enc_String_Equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst result, 5538 iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, iRegPdst tmp4, iRegPdst tmp5) %{ 5539 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5540 MacroAssembler _masm(&cbuf); 5541 __ char_arrays_equals($str1$$Register, $str2$$Register, $cnt$$Register, $result$$Register, 5542 $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register); 5543 %} 5544 5545 enc_class enc_String_Compare(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result, 5546 iRegPdst tmp) %{ 5547 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5548 MacroAssembler _masm(&cbuf); 5549 __ string_compare($str1$$Register, $str2$$Register, $cnt1$$Register, $cnt2$$Register, 5550 $result$$Register, $tmp$$Register); 5551 %} 5552 5553 enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL mem ) %{ 5554 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5555 5556 MacroAssembler _masm(&cbuf); 5557 Register Rdst = reg_to_register_object($dst$$reg); 5558 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5559 Register Rbase= reg_to_register_object($mem$$base); 5560 int Idisp = $mem$$disp; 5561 if ($mem$$base == 1) { 5562 Idisp += 0 /* TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes()*/; 5563 } 5564 Label done; 5565 __ bso(Rcrx, done); 5566 __ ld(Rdst, Idisp, Rbase); 5567 #if 0 // TODO: PPC port 5568 if (_size == 12) { 5569 // Bundler will have changed our size to 8 if stop is not required. 5570 __ endgroup(); 5571 } 5572 #endif 5573 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5574 __ bind(done); 5575 %} 5576 5577 enc_class lateExpand_cmovI_bso_stackSlotL_conLValue0(iRegIdst dst, flagsReg crx, stackSlotL mem) %{ 5578 // 5579 // replaces 5580 // 5581 // region dst crx mem 5582 // \ | | / 5583 // dst=cmovI_bso_stackSlotL_conLvalue0 5584 // 5585 // with 5586 // 5587 // region dst 5588 // \ / 5589 // dst=loadConI16(0) 5590 // | 5591 // ^ region dst crx mem 5592 // | \ | | / 5593 // dst=cmovI_bso_stackSlotL 5594 // 5595 5596 // Create new nodes. 5597 MachNode *m1 = new (C) loadConI16Node(); 5598 MachNode *m2 = new (C) cmovI_bso_stackSlotLNode(); 5599 5600 // inputs for new nodes 5601 m1->add_req(n_region); 5602 m2->add_req(n_region, n_crx, n_mem); 5603 5604 // precedences for new nodes 5605 m2->add_prec(m1); 5606 5607 // operands for new nodes 5608 m1->_opnds[0] = op_dst; 5609 m1->_opnds[1] = new (C) immI16Oper(0); 5610 5611 m2->_opnds[0] = op_dst; 5612 m2->_opnds[1] = op_crx; 5613 m2->_opnds[2] = op_mem; 5614 5615 // registers for new nodes 5616 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst 5617 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst 5618 5619 // Insert new nodes. 5620 nodes->push(m1); 5621 nodes->push(m2); 5622 %} 5623 5624 enc_class lateExpand_cmovL_bso_stackSlotL_conLvalue0(iRegLdst dst, flagsReg crx, stackSlotL mem) %{ 5625 // 5626 // replaces 5627 // 5628 // region dst crx mem 5629 // \ | | / 5630 // dst=cmovL_bso_stackSlotL_conLvalue0 5631 // 5632 // with 5633 // 5634 // region dst 5635 // \ / 5636 // dst=loadConL16(0) 5637 // | 5638 // ^ region dst crx mem 5639 // | \ | | / 5640 // dst=cmovL_bso_stackSlotL 5641 // 5642 5643 // Create new nodes. 5644 MachNode *m1 = new (C) loadConL16Node(); 5645 MachNode *m2 = new (C) cmovL_bso_stackSlotLNode(); 5646 5647 // inputs for new nodes 5648 m1->add_req(n_region); 5649 m2->add_req(n_region, n_crx, n_mem); 5650 5651 // precedences for new nodes 5652 m2->add_prec(m1); 5653 5654 // operands for new nodes 5655 m1->_opnds[0] = op_dst; 5656 m1->_opnds[1] = new (C) immL16Oper(0); 5657 5658 m2->_opnds[0] = op_dst; 5659 m2->_opnds[1] = op_crx; 5660 m2->_opnds[2] = op_mem; 5661 5662 // registers for new nodes 5663 ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst 5664 ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst 5665 5666 // Insert new nodes. 5667 nodes->push(m1); 5668 nodes->push(m2); 5669 %} 5670 5671 enc_class enc_cmove_reg(iRegIdst dst, flagsReg crx, iRegIsrc src, cmpOp cmp) %{ 5672 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5673 5674 MacroAssembler _masm(&cbuf); 5675 Register Rdst = reg_to_register_object($dst$$reg); 5676 Register Rsrc = reg_to_register_object($src$$reg); 5677 int cc = $cmp$$cmpcode; 5678 int flags_reg = $crx$$reg; 5679 Label done; 5680 assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding"); 5681 // Branch if not (cmp crx). 5682 __ bc(cc_to_inverse_boint(cc), cc_to_biint(cc, flags_reg), done); 5683 __ mr(Rdst, Rsrc); 5684 #if 0 // TODO: PPC port 5685 if (_size == 12) { 5686 // Bundler will have changed our size to 8 if stop is not required. 5687 __ endgroup(); 5688 } 5689 #endif 5690 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5691 __ bind(done); 5692 %} 5693 5694 // isel for cmove 5695 enc_class enc_isel(iRegIdst dst, flagsReg crx, iRegIsrc src, cmpOp cmp) %{ 5696 // This is a Power7 instruction for which no machine description 5697 // exists. Anyways, the scheduler should be off on Power7. 5698 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5699 MacroAssembler _masm(&cbuf); 5700 Register Rdst = reg_to_register_object($dst$$reg); 5701 Register Rsrc = reg_to_register_object($src$$reg); 5702 ConditionRegister Rc = reg_to_ConditionRegister_object($crx$$reg); 5703 int cc = $cmp$$cmpcode; 5704 __ isel(Rdst, Rc, (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), Rsrc); 5705 %} 5706 5707 // Set dst = 0 if crx is equal, otherwise dst = src 5708 enc_class enc_isel_set_to_0_or_value(iRegPdst dst, flagsReg crx, iRegPsrc src) %{ 5709 // TODO: PPC port $archOpcode(ppc64Opcode_compound); // see above 5710 5711 MacroAssembler _masm(&cbuf); 5712 Register Rdst = reg_to_register_object($dst$$reg); 5713 Register Rsrc = reg_to_register_object($src$$reg); 5714 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5715 5716 __ isel_0(Rdst, Rcrx, Assembler::equal, Rsrc); 5717 %} 5718 5719 enc_class enc_cond_add_base(iRegPdst dst, flagsReg crx, iRegPsrc src) %{ 5720 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5721 5722 MacroAssembler _masm(&cbuf); 5723 Register Rdst = reg_to_register_object($dst$$reg); 5724 Register Rsrc = reg_to_register_object($src$$reg); 5725 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5726 Label done; 5727 5728 __ beq(Rcrx, done); 5729 __ add(Rdst, Rsrc, R30); 5730 #if 0 // TODO: PPC port 5731 if (_size == 12) { 5732 // Bundler will have changed our size to 8 if stop is not required. 5733 __ endgroup(); 5734 } 5735 #endif 5736 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5737 __ bind(done); 5738 %} 5739 5740 enc_class enc_cond_sub_base(iRegPdst dst, flagsReg crx, iRegPsrc src) %{ 5741 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5742 5743 MacroAssembler _masm(&cbuf); 5744 Register Rdst = reg_to_register_object($dst$$reg); 5745 Register Rsrc = reg_to_register_object($src$$reg); 5746 ConditionRegister Rcrx = reg_to_ConditionRegister_object($crx$$reg); 5747 Label done; 5748 5749 __ beq(Rcrx, done); 5750 __ sub(Rdst, Rsrc, R30); 5751 #if 0 // TODO: PPC port 5752 if (_size == 12) { 5753 // Bundler will have changed our size to 8 if stop is not required. 5754 __ endgroup(); 5755 } 5756 #endif 5757 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5758 __ bind(done); 5759 %} 5760 5761 enc_class enc_cmove_imm(iRegIdst dst, flagsReg crx, immI16 src, cmpOp cmp) %{ 5762 // TODO: PPC port $archOpcode(ppc64Opcode_cmove); 5763 5764 MacroAssembler _masm(&cbuf); 5765 Register Rdst = reg_to_register_object($dst$$reg); 5766 int Csrc = $src$$constant; 5767 int cc = $cmp$$cmpcode; 5768 int flags_reg = $crx$$reg; 5769 Label done; 5770 assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding"); 5771 // Branch if not (cmp crx). 5772 __ bc(cc_to_inverse_boint(cc), cc_to_biint(cc, flags_reg), done); 5773 __ li(Rdst, Csrc); 5774 #if 0 // TODO: PPC port 5775 if (_size == 12) { 5776 // Bundler will have changed our size to 8 if stop is not required. 5777 __ endgroup(); 5778 } 5779 #endif 5780 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5781 __ bind(done); 5782 %} 5783 5784 enc_class enc_cmovef_reg(regF dst, flagsReg crx, regF src, cmpOp cmp) %{ 5785 // TODO: PPC port $archOpcode(ppc64Opcode_cmovef); 5786 5787 MacroAssembler _masm(&cbuf); 5788 FloatRegister Rdst = reg_to_FloatRegister_object($dst$$reg); 5789 FloatRegister Rsrc = reg_to_FloatRegister_object($src$$reg); 5790 int cc = $cmp$$cmpcode; 5791 int flags_reg = $crx$$reg; 5792 Label done; 5793 assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding"); 5794 // Branch if not (cmp crx). 5795 __ bc(cc_to_inverse_boint(cc), cc_to_biint(cc, flags_reg), done); 5796 __ fmr(Rdst, Rsrc); 5797 #if 0 // TODO: PPC port 5798 if (_size == 12) { 5799 // Bundler will have changed our size to 8 if stop is not required 5800 __ endgroup(); 5801 } 5802 #endif 5803 if (false /* TODO: PPC PORT ra_->C->do_hb_scheduling()*/) Unimplemented(); 5804 __ bind(done); 5805 %} 5806 5807 enc_class enc_compareAndSwapI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2) %{ 5808 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5809 5810 MacroAssembler _masm(&cbuf); 5811 Register Rdummy = R0; 5812 Register Rres = reg_to_register_object($res$$reg); 5813 Register Rold = reg_to_register_object($src1$$reg); 5814 Register Rnew = reg_to_register_object($src2$$reg); 5815 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5816 5817 // CmpxchgX sets CCR0 to cmpX(Rval, Rcomp) and Rres to 'true'/'false'. 5818 __ cmpxchgw(CCR0, Rdummy, Rold, Rnew, Rptr, MacroAssembler::MemBarNone, 5819 MacroAssembler::cmpxchgx_hint_atomic_update(), Rres, true); 5820 %} 5821 5822 enc_class enc_compareAndSwapN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2) %{ 5823 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5824 5825 MacroAssembler _masm(&cbuf); 5826 Register Rdummy = R0; 5827 Register Rres = reg_to_register_object($res$$reg); 5828 Register Rold = reg_to_register_object($src1$$reg); 5829 Register Rnew = reg_to_register_object($src2$$reg); 5830 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5831 5832 // CmpxchgX sets CCR0 to cmpX(Rval, Rcomp) and Rres to 'true'/'false'. 5833 __ cmpxchgw(CCR0, Rdummy, Rold, Rnew, Rptr, MacroAssembler::MemBarNone, 5834 MacroAssembler::cmpxchgx_hint_atomic_update(), Rres, true); 5835 %} 5836 5837 // As enc_compareAndSwapLP, but return flag register instead of boolean value in 5838 // int register. 5839 enc_class enc_storeConditionalLP(flagsReg crx, iRegPdst mem_ptr, iRegLsrc oldVal, iRegLsrc newVal) %{ 5840 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5841 5842 MacroAssembler _masm(&cbuf); 5843 ConditionRegister flag = reg_to_ConditionRegister_object($crx$$reg); 5844 Register Rdummy = R0; 5845 Register Rres = R0; 5846 Register Rold = reg_to_register_object($oldVal$$reg); 5847 Register Rnew = reg_to_register_object($newVal$$reg); 5848 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5849 __ cmpxchgd(flag, Rdummy, Rold, Rnew, Rptr, MacroAssembler::MemBarNone, 5850 MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, NULL, true); 5851 %} 5852 5853 enc_class enc_compareAndSwapLP(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2) %{ 5854 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5855 5856 MacroAssembler _masm(&cbuf); 5857 Register Rdummy = R0; 5858 Register Rres = reg_to_register_object($res$$reg); 5859 Register Rold = reg_to_register_object($src1$$reg); 5860 Register Rnew = reg_to_register_object($src2$$reg); 5861 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5862 5863 // CmpxchgX sets CCR0 to cmpX(Rval, Rcomp) and Rres to 'true'/'false'. 5864 __ cmpxchgd(CCR0, Rdummy, Rold, Rnew, Rptr, MacroAssembler::MemBarNone, 5865 MacroAssembler::cmpxchgx_hint_atomic_update(), Rres, NULL, true); 5866 %} 5867 5868 enc_class enc_GetAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{ 5869 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5870 5871 MacroAssembler _masm(&cbuf); 5872 Register Rtmp = R0; 5873 Register Rres = reg_to_register_object($res$$reg); 5874 Register Rsrc = reg_to_register_object($src$$reg); 5875 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5876 bool RegCollision = (Rres == Rsrc) || (Rres == Rptr); 5877 Register Rold = RegCollision ? Rtmp : Rres; 5878 5879 Label Lretry; 5880 __ bind(Lretry); 5881 __ lwarx(Rold, Rptr, /*hint*/ false); 5882 __ add(Rtmp, Rsrc, Rold); 5883 __ stwcx_(Rtmp, Rptr); 5884 __ bne(CCR0, Lretry); 5885 if (RegCollision) __ subf(Rres, Rsrc, Rtmp); 5886 %} 5887 5888 enc_class enc_GetAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{ 5889 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5890 5891 MacroAssembler _masm(&cbuf); 5892 Register Rtmp = R0; 5893 Register Rres = reg_to_register_object($res$$reg); 5894 Register Rsrc = reg_to_register_object($src$$reg); 5895 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5896 bool RegCollision = (Rres == Rsrc) || (Rres == Rptr); 5897 Register Rold = RegCollision ? Rtmp : Rres; 5898 5899 Label Lretry; 5900 __ bind(Lretry); 5901 __ ldarx(Rold, Rptr, /*hint*/ false); 5902 __ add(Rtmp, Rsrc, Rold); 5903 __ stdcx_(Rtmp, Rptr); 5904 __ bne(CCR0, Lretry); 5905 if (RegCollision) __ subf(Rres, Rsrc, Rtmp); 5906 %} 5907 5908 enc_class enc_GetAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{ 5909 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5910 5911 MacroAssembler _masm(&cbuf); 5912 Register Rtmp = R0; 5913 Register Rres = reg_to_register_object($res$$reg); 5914 Register Rsrc = reg_to_register_object($src$$reg); 5915 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5916 bool RegCollision = (Rres == Rsrc) || (Rres == Rptr); 5917 Register Rold = RegCollision ? Rtmp : Rres; 5918 5919 Label Lretry; 5920 __ bind(Lretry); 5921 __ lwarx(Rold, Rptr, /*hint*/ false); 5922 __ stwcx_(Rsrc, Rptr); 5923 __ bne(CCR0, Lretry); 5924 if (RegCollision) __ mr(Rres, Rtmp); 5925 %} 5926 5927 enc_class enc_GetAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{ 5928 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5929 5930 MacroAssembler _masm(&cbuf); 5931 Register Rtmp = R0; 5932 Register Rres = reg_to_register_object($res$$reg); 5933 Register Rsrc = reg_to_register_object($src$$reg); 5934 Register Rptr = reg_to_register_object($mem_ptr$$reg); 5935 bool RegCollision = (Rres == Rsrc) || (Rres == Rptr); 5936 Register Rold = RegCollision ? Rtmp : Rres; 5937 5938 Label Lretry; 5939 __ bind(Lretry); 5940 __ ldarx(Rold, Rptr, /*hint*/ false); 5941 __ stdcx_(Rsrc, Rptr); 5942 __ bne(CCR0, Lretry); 5943 if (RegCollision) __ mr(Rres, Rtmp); 5944 %} 5945 5946 // End a group. 5947 enc_class enc_end_bundle() %{ 5948 // TODO: PPC port $archOpcode(ppc64Opcode_endgroup); 5949 MacroAssembler _masm(&cbuf); 5950 __ endgroup(); 5951 %} 5952 5953 // Nop emitters. 5954 enc_class enc_fxnop() %{ 5955 // TODO: PPC port $archOpcode(ppc64Opcode_fmr); 5956 MacroAssembler _masm(&cbuf); 5957 __ nop(); 5958 %} 5959 5960 enc_class enc_fpnop0() %{ 5961 // TODO: PPC port $archOpcode(ppc64Opcode_fmr); 5962 MacroAssembler _masm(&cbuf); 5963 __ fpnop0(); 5964 %} 5965 5966 enc_class enc_fpnop1() %{ 5967 // TODO: PPC port $archOpcode(ppc64Opcode_fmr); 5968 MacroAssembler _masm(&cbuf); 5969 __ fpnop1(); 5970 %} 5971 5972 enc_class enc_brnop0() %{ 5973 // TODO: PPC port $archOpcode(ppc64Opcode_mcrf); 5974 MacroAssembler _masm(&cbuf); 5975 __ brnop0(); 5976 %} 5977 5978 enc_class enc_brnop1() %{ 5979 // TODO: PPC port $archOpcode(ppc64Opcode_mcrf); 5980 MacroAssembler _masm(&cbuf); 5981 __ brnop1(); 5982 %} 5983 5984 enc_class enc_brnop2() %{ 5985 // TODO: PPC port $archOpcode(ppc64Opcode_mcrf); 5986 MacroAssembler _masm(&cbuf); 5987 __ brnop2(); 5988 %} 5989 5990 enc_class enc_rethrow() %{ 5991 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 5992 MacroAssembler _masm(&cbuf); 5993 cbuf.set_insts_mark(); 5994 __ b64_patchable((address)OptoRuntime::rethrow_stub(), 5995 relocInfo::runtime_call_type); 5996 %} 5997 5998 enc_class enc_membar_acquire %{ 5999 // TODO: PPC port $archOpcode(ppc64Opcode_lwsync); 6000 MacroAssembler _masm(&cbuf); 6001 __ acquire(); 6002 %} 6003 6004 enc_class enc_membar_release %{ 6005 // TODO: PPC port $archOpcode(ppc64Opcode_lwsync); 6006 MacroAssembler _masm(&cbuf); 6007 __ release(); 6008 %} 6009 6010 enc_class enc_membar_volatile %{ 6011 // TODO: PPC port $archOpcode(ppc64Opcode_sync); 6012 MacroAssembler _masm(&cbuf); 6013 __ fence(); 6014 %} 6015 6016 enc_class enc_poll(immI dst, iRegLdst poll) %{ 6017 // TODO: PPC port $archOpcode(ppc64Opcode_ld); 6018 // Fake operand dst needed for PPC scheduler. 6019 assert($dst$$constant == 0x0, "dst must be 0x0"); 6020 6021 MacroAssembler _masm(&cbuf); 6022 // Mark the code position where the load from the safepoint 6023 // polling page was emitted as relocInfo::poll_type. 6024 __ relocate(relocInfo::poll_type); 6025 __ load_from_polling_page($poll$$Register); 6026 %} 6027 6028 enc_class enc_loadConPollAddr(iRegPdst poll) %{ 6029 // TODO: PPC port $archOpcode(ppc64Opcode_compound); 6030 MacroAssembler _masm(&cbuf); 6031 6032 Register Rpoll = reg_to_register_object($poll$$reg); 6033 __ load_const_optimized(Rpoll, (long)(address) os::get_polling_page()); // TODO: PPC port get_standard_polling_page()); 6034 %} 6035 6036 enc_class enc_insrdi(iRegLdst dst, immI16 pos, iRegLsrc src, immI16 shift) %{ 6037 // TODO: PPC port $archOpcode(ppc64Opcode_rldimi); 6038 MacroAssembler _masm(&cbuf); 6039 Register src_reg = reg_to_register_object($src$$reg); 6040 Register dst_reg = reg_to_register_object($dst$$reg); 6041 6042 __ insrdi(dst_reg, src_reg, $shift$$constant, $pos$$constant); 6043 %} 6044 6045 enc_class enc_insrwi(iRegLdst dst, immI16 pos, iRegLsrc src, immI16 shift) %{ 6046 // TODO: PPC port $archOpcode(ppc64Opcode_rlwimi); 6047 MacroAssembler _masm(&cbuf); 6048 Register src_reg = reg_to_register_object($src$$reg); 6049 Register dst_reg = reg_to_register_object($dst$$reg); 6050 6051 __ insrwi(dst_reg, src_reg, $shift$$constant, $pos$$constant); 6052 %} 6053 6054 enc_class enc_popcntw(iRegLdst dst, iRegLsrc src) %{ 6055 // TODO: PPC port $archOpcode(ppc64Opcode_popcntb); 6056 MacroAssembler _masm(&cbuf); 6057 Register src_reg = reg_to_register_object($src$$reg); 6058 Register dst_reg = reg_to_register_object($dst$$reg); 6059 __ popcntw(dst_reg, src_reg); 6060 %} 6061 6062 enc_class enc_popcntd(iRegLdst dst, iRegLsrc src) %{ 6063 // TODO: PPC port $archOpcode(ppc64Opcode_popcntb); 6064 MacroAssembler _masm(&cbuf); 6065 Register src_reg = reg_to_register_object($src$$reg); 6066 Register dst_reg = reg_to_register_object($dst$$reg); 6067 __ popcntd(dst_reg, src_reg); 6068 %} 6069 6070 %} 6071 6072 //----------FRAME-------------------------------------------------------------- 6073 // Definition of frame structure and management information. 6074 6075 frame %{ 6076 // What direction does stack grow in (assumed to be same for native & Java). 6077 stack_direction(TOWARDS_LOW); 6078 6079 // These two registers define part of the calling convention between 6080 // compiled code and the interpreter. 6081 6082 // Inline Cache Register or methodOop for I2C. 6083 inline_cache_reg(R19); // R19_method 6084 6085 // Method Oop Register when calling interpreter. 6086 interpreter_method_oop_reg(R19); // R19_method 6087 6088 // Optional: name the operand used by cisc-spilling to access 6089 // [stack_pointer + offset]. 6090 cisc_spilling_operand_name(indOffset); 6091 6092 // Number of stack slots consumed by a Monitor enter. 6093 sync_stack_slots((frame::jit_monitor_size / VMRegImpl::stack_slot_size)); 6094 6095 // Compiled code's Frame Pointer. 6096 frame_pointer(R1); // R1_SP 6097 6098 // Interpreter stores its frame pointer in a register which is 6099 // stored to the stack by I2CAdaptors. I2CAdaptors convert from 6100 // interpreted java to compiled java. 6101 // 6102 // R14_state holds pointer to caller's cInterpreter. 6103 interpreter_frame_pointer(R14); // R14_state 6104 6105 stack_alignment(frame::alignment_in_bytes); 6106 6107 in_preserve_stack_slots((frame::jit_in_preserve_size / VMRegImpl::stack_slot_size)); 6108 6109 // Number of outgoing stack slots killed above the 6110 // out_preserve_stack_slots for calls to C. Supports the var-args 6111 // backing area for register parms. 6112 // 6113 varargs_C_out_slots_killed(((frame::abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size)); 6114 6115 // The after-PROLOG location of the return address. Location of 6116 // return address specifies a type (REG or STACK) and a number 6117 // representing the register number (i.e. - use a register name) or 6118 // stack slot. 6119 // 6120 // A: Link register is stored in stack slot ... 6121 // M: ... but it's in the caller's frame according to PPC-64 ABI. 6122 // J: Therefore, we make sure that the link register is also in R11_scratch1 6123 // at the end of the prolog. 6124 // B: We use R20, now. 6125 //return_addr(REG R20); 6126 6127 // G: After reading the comments made by all the luminaries on their 6128 // failure to tell the compiler where the return address really is, 6129 // I hardly dare to try myself. However, I'm convinced it's in slot 6130 // 4 what apparently works and saves us some spills. 6131 return_addr(STACK 4); 6132 6133 // This is the body of the function 6134 // 6135 // void Matcher::calling_convention(OptoRegPair* sig, // array of ideal regs 6136 // uint length, // length of array 6137 // bool is_outgoing) 6138 // 6139 // The `sig' array is to be updated. sig[j] represents the location 6140 // of the j-th argument, either a register or a stack slot. 6141 6142 // Comment taken from i486.ad: 6143 // Body of function which returns an integer array locating 6144 // arguments either in registers or in stack slots. Passed an array 6145 // of ideal registers called "sig" and a "length" count. Stack-slot 6146 // offsets are based on outgoing arguments, i.e. a CALLER setting up 6147 // arguments for a CALLEE. Incoming stack arguments are 6148 // automatically biased by the preserve_stack_slots field above. 6149 calling_convention %{ 6150 // No difference between ingoing/outgoing. Just pass false. 6151 SharedRuntime::java_calling_convention(sig_bt, regs, length, false); 6152 %} 6153 6154 // Comment taken from i486.ad: 6155 // Body of function which returns an integer array locating 6156 // arguments either in registers or in stack slots. Passed an array 6157 // of ideal registers called "sig" and a "length" count. Stack-slot 6158 // offsets are based on outgoing arguments, i.e. a CALLER setting up 6159 // arguments for a CALLEE. Incoming stack arguments are 6160 // automatically biased by the preserve_stack_slots field above. 6161 c_calling_convention %{ 6162 // This is obviously always outgoing. 6163 // C argument in register AND stack slot. 6164 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length); 6165 %} 6166 6167 // Location of native (C/C++) and interpreter return values. This 6168 // is specified to be the same as Java. In the 32-bit VM, long 6169 // values are actually returned from native calls in O0:O1 and 6170 // returned to the interpreter in I0:I1. The copying to and from 6171 // the register pairs is done by the appropriate call and epilog 6172 // opcodes. This simplifies the register allocator. 6173 c_return_value %{ 6174 assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) || 6175 (ideal_reg == Op_RegN && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0), 6176 "only return normal values"); 6177 // enum names from opcodes.hpp: Op_Node Op_Set Op_RegN Op_RegI Op_RegP Op_RegF Op_RegD Op_RegL 6178 static int typeToRegLo[Op_RegL+1] = { 0, 0, R3_num, R3_num, R3_num, F1_num, F1_num, R3_num }; 6179 static int typeToRegHi[Op_RegL+1] = { 0, 0, OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num }; 6180 return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]); 6181 %} 6182 6183 // Location of compiled Java return values. Same as C 6184 return_value %{ 6185 assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) || 6186 (ideal_reg == Op_RegN && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0), 6187 "only return normal values"); 6188 // enum names from opcodes.hpp: Op_Node Op_Set Op_RegN Op_RegI Op_RegP Op_RegF Op_RegD Op_RegL 6189 static int typeToRegLo[Op_RegL+1] = { 0, 0, R3_num, R3_num, R3_num, F1_num, F1_num, R3_num }; 6190 static int typeToRegHi[Op_RegL+1] = { 0, 0, OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num }; 6191 return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]); 6192 %} 6193 %} 6194 6195 6196 //----------ATTRIBUTES--------------------------------------------------------- 6197 6198 //----------Operand Attributes------------------------------------------------- 6199 op_attrib op_cost(1); // Required cost attribute. 6200 6201 //----------Instruction Attributes--------------------------------------------- 6202 6203 // Cost attribute. required. 6204 ins_attrib ins_cost(DEFAULT_COST); 6205 6206 // Is this instruction a non-matching short branch variant of some 6207 // long branch? Not required. 6208 ins_attrib ins_short_branch(0); 6209 6210 // This instruction does implicit checks at the given machine-instruction offset 6211 // (optional attribute). 6212 ins_attrib ins_implicit_check_offset(-1); // TODO: PPC port 6213 6214 ins_attrib ins_implicit_check_follows_matched_true_path(true); 6215 ins_attrib ins_is_TrapBasedCheckNode(true); 6216 6217 // Number of constants. 6218 // This instruction uses the given number of constants 6219 // (optional attribute). 6220 // This is needed to determine in time whether the constant pool will 6221 // exceed 4000 entries. Before lateExpand the overall number of constants 6222 // is determined. It's also used to compute the constant pool size 6223 // in Output(). 6224 ins_attrib ins_num_consts(0); 6225 6226 // Required alignment attribute (must be a power of 2) specifies the 6227 // alignment that some part of the instruction (not necessarily the 6228 // start) requires. If > 1, a compute_padding() function must be 6229 // provided for the instruction. 6230 ins_attrib ins_alignment(1); 6231 6232 // Enforce/prohibit rematerializations. 6233 // - If an instruction is attributed with 'ins_cannot_rematerialize(true)' 6234 // then rematerialization of that instruction is prohibited and the 6235 // instruction's value will be spilled if necessary. 6236 // Causes that MachNode::rematerialize() returns false. 6237 // - If an instruction is attributed with 'ins_should_rematerialize(true)' 6238 // then rematerialization should be enforced and a copy of the instruction 6239 // should be inserted if possible; rematerialization is not guaranteed. 6240 // Note: this may result in rematerializations in front of every use. 6241 // Causes that MachNode::rematerialize() can return true. 6242 // (optional attribute) 6243 ins_attrib ins_cannot_rematerialize(false); 6244 ins_attrib ins_should_rematerialize(false); 6245 6246 // Instruction has variable size depending on alignment. 6247 ins_attrib ins_variable_size_depending_on_alignment(false); 6248 6249 // Instruction requires the toc register. 6250 ins_attrib ins_requires_toc(false); 6251 6252 // Instruction is a nop. 6253 ins_attrib ins_is_nop(false); 6254 6255 // Instruction is mapped to a MachIfFastLock node (instead of MachFastLock). 6256 ins_attrib ins_use_mach_if_fast_lock_node(false); 6257 6258 // Field for the toc offset of a constant. 6259 // 6260 // This is needed if the toc offset is not encodable as an immediate in 6261 // the PPC load instruction. If so, the upper (hi) bits of the offset are 6262 // added to the toc, and from this a load with immediate is performed. 6263 // With late expand, we get two nodes that require the same offset 6264 // but which don't know about each other. The offset is only known 6265 // when the constant is added to the constant pool during emitting. 6266 // It is generated in the 'hi'-node adding the upper bits, and saved 6267 // in this node. The 'lo'-node has a link to the 'hi'-node and reads 6268 // the offset from there when it gets encoded. 6269 ins_attrib ins_field_const_toc_offset(0); 6270 ins_attrib ins_field_const_toc_offset_hi_node(0); 6271 6272 // A field that can hold the instructions offset in the code buffer. 6273 // Set in the nodes emitter. 6274 ins_attrib ins_field_cbuf_insts_offset(-1); 6275 6276 // Fields for referencing a call's load-IC-node. 6277 // If the toc offset can not be encoded as an immediate in a load, we 6278 // use two nodes. 6279 ins_attrib ins_field_load_ic_hi_node(0); 6280 ins_attrib ins_field_load_ic_node(0); 6281 6282 //----------OPERANDS----------------------------------------------------------- 6283 // Operand definitions must precede instruction definitions for correct 6284 // parsing in the ADLC because operands constitute user defined types 6285 // which are used in instruction definitions. 6286 // 6287 // Formats are generated automatically for constants and base registers. 6288 6289 //----------Simple Operands---------------------------------------------------- 6290 // Immediate Operands 6291 6292 // Integer Immediate: 32-bit 6293 operand immI() %{ 6294 match(ConI); 6295 op_cost(40); 6296 format %{ %} 6297 interface(CONST_INTER); 6298 %} 6299 6300 operand immI8() %{ 6301 predicate(Assembler::is_simm(n->get_int(), 8)); 6302 op_cost(0); 6303 match(ConI); 6304 format %{ %} 6305 interface(CONST_INTER); 6306 %} 6307 6308 // Integer Immediate: 16-bit 6309 operand immI16() %{ 6310 predicate(Assembler::is_simm(n->get_int(), 16)); 6311 op_cost(0); 6312 match(ConI); 6313 format %{ %} 6314 interface(CONST_INTER); 6315 %} 6316 6317 // Integer Immediate: 32-bit, where lowest 16 bits are 0x0000. 6318 operand immIhi16() %{ 6319 predicate(((n->get_int() & 0xffff0000) != 0) && ((n->get_int() & 0xffff) == 0)); 6320 match(ConI); 6321 op_cost(0); 6322 format %{ %} 6323 interface(CONST_INTER); 6324 %} 6325 6326 operand immInegpow2() %{ 6327 predicate(is_power_of_2_long((jlong) (julong) (juint) (-(n->get_int())))); 6328 match(ConI); 6329 op_cost(0); 6330 format %{ %} 6331 interface(CONST_INTER); 6332 %} 6333 6334 operand immIpow2minus1() %{ 6335 predicate(is_power_of_2_long((((jlong) (n->get_int()))+1))); 6336 match(ConI); 6337 op_cost(0); 6338 format %{ %} 6339 interface(CONST_INTER); 6340 %} 6341 6342 operand immIpowerOf2() %{ 6343 predicate(is_power_of_2_long((((jlong) (julong) (juint) (n->get_int()))))); 6344 match(ConI); 6345 op_cost(0); 6346 format %{ %} 6347 interface(CONST_INTER); 6348 %} 6349 6350 // Unsigned Integer Immediate: the values 0-31 6351 operand uimmI5() %{ 6352 predicate(Assembler::is_uimm(n->get_int(), 5)); 6353 match(ConI); 6354 op_cost(0); 6355 format %{ %} 6356 interface(CONST_INTER); 6357 %} 6358 6359 // Unsigned Integer Immediate: 6-bit 6360 operand uimmI6() %{ 6361 predicate(Assembler::is_uimm(n->get_int(), 6)); 6362 match(ConI); 6363 op_cost(0); 6364 format %{ %} 6365 interface(CONST_INTER); 6366 %} 6367 6368 // Unsigned Integer Immediate: 6-bit int, greater than 32 6369 operand uimmI6_ge32() %{ 6370 predicate(Assembler::is_uimm(n->get_int(), 6) && n->get_int() >= 32); 6371 match(ConI); 6372 op_cost(0); 6373 format %{ %} 6374 interface(CONST_INTER); 6375 %} 6376 6377 // Unsigned Integer Immediate: 15-bit 6378 operand uimmI15() %{ 6379 predicate(Assembler::is_uimm(n->get_int(), 15)); 6380 match(ConI); 6381 op_cost(0); 6382 format %{ %} 6383 interface(CONST_INTER); 6384 %} 6385 6386 // Unsigned Integer Immediate: 16-bit 6387 operand uimmI16() %{ 6388 predicate(Assembler::is_uimm(n->get_int(), 16)); 6389 match(ConI); 6390 op_cost(0); 6391 format %{ %} 6392 interface(CONST_INTER); 6393 %} 6394 6395 // constant 'int 0'. 6396 operand immI_0() %{ 6397 predicate(n->get_int() == 0); 6398 match(ConI); 6399 op_cost(0); 6400 format %{ %} 6401 interface(CONST_INTER); 6402 %} 6403 6404 // constant 'int 1'. 6405 operand immI_1() %{ 6406 predicate(n->get_int() == 1); 6407 match(ConI); 6408 op_cost(0); 6409 format %{ %} 6410 interface(CONST_INTER); 6411 %} 6412 6413 // constant 'int -1'. 6414 operand immI_minus1() %{ 6415 predicate(n->get_int() == -1); 6416 match(ConI); 6417 op_cost(0); 6418 format %{ %} 6419 interface(CONST_INTER); 6420 %} 6421 6422 // int value 16. 6423 operand immI_16() %{ 6424 predicate(n->get_int() == 16); 6425 match(ConI); 6426 op_cost(0); 6427 format %{ %} 6428 interface(CONST_INTER); 6429 %} 6430 6431 // int value 24. 6432 operand immI_24() %{ 6433 predicate(n->get_int() == 24); 6434 match(ConI); 6435 op_cost(0); 6436 format %{ %} 6437 interface(CONST_INTER); 6438 %} 6439 6440 // Compressed oops constants 6441 // Pointer Immediate 6442 operand immN() %{ 6443 match(ConN); 6444 6445 op_cost(10); 6446 format %{ %} 6447 interface(CONST_INTER); 6448 %} 6449 6450 // NULL Pointer Immediate 6451 operand immN_0() %{ 6452 predicate(n->get_narrowcon() == 0); 6453 match(ConN); 6454 6455 op_cost(0); 6456 format %{ %} 6457 interface(CONST_INTER); 6458 %} 6459 6460 //// Compressed klass constants 6461 //operand immNKlass() %{ 6462 // match(ConNKlass); 6463 // 6464 // op_cost(0); 6465 // format %{ %} 6466 // interface(CONST_INTER); 6467 //%} 6468 6469 // Pointer Immediate: 64-bit 6470 operand immP() %{ 6471 match(ConP); 6472 op_cost(0); 6473 format %{ %} 6474 interface(CONST_INTER); 6475 %} 6476 6477 // Operand to avoid match of loadConP. 6478 // This operand can be used to avoid matching of an instruct 6479 // with chain rule. 6480 operand immP_NM() %{ 6481 match(ConP); 6482 predicate(false); 6483 op_cost(0); 6484 format %{ %} 6485 interface(CONST_INTER); 6486 %} 6487 6488 // costant 'pointer 0'. 6489 operand immP_0() %{ 6490 predicate(n->get_ptr() == 0); 6491 match(ConP); 6492 op_cost(0); 6493 format %{ %} 6494 interface(CONST_INTER); 6495 %} 6496 6497 // pointer 0x0 or 0x1 6498 operand immP_0or1() %{ 6499 predicate((n->get_ptr() == 0) || (n->get_ptr() == 1)); 6500 match(ConP); 6501 op_cost(0); 6502 format %{ %} 6503 interface(CONST_INTER); 6504 %} 6505 6506 operand immP_poll() %{ 6507 // Use standard_polling_page. 6508 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); // TODO: PPC port get_standard_polling_page()); 6509 match(ConP); 6510 op_cost(0); 6511 format %{ %} 6512 interface(CONST_INTER); 6513 %} 6514 6515 operand immL() %{ 6516 match(ConL); 6517 op_cost(40); 6518 format %{ %} 6519 interface(CONST_INTER); 6520 %} 6521 6522 // Long Immediate: 16-bit 6523 operand immL16() %{ 6524 predicate(Assembler::is_simm(n->get_long(), 16)); 6525 match(ConL); 6526 op_cost(0); 6527 format %{ %} 6528 interface(CONST_INTER); 6529 %} 6530 6531 // Long Immediate: 16-bit, 4-aligned 6532 operand immL16Alg4() %{ 6533 predicate(Assembler::is_simm(n->get_long(), 16) && ((n->get_long() & 0x3) == 0)); 6534 match(ConL); 6535 op_cost(0); 6536 format %{ %} 6537 interface(CONST_INTER); 6538 %} 6539 6540 // Long Immediate: 32-bit, where lowest 16 bits are 0x0000. 6541 operand immL32hi16() %{ 6542 predicate(Assembler::is_simm(n->get_long(), 32) && ((n->get_long() & 0xffffL) == 0L)); 6543 match(ConL); 6544 op_cost(0); 6545 format %{ %} 6546 interface(CONST_INTER); 6547 %} 6548 6549 // Long Immediate: 32-bit 6550 operand immL32() %{ 6551 predicate(Assembler::is_simm(n->get_long(), 32)); 6552 match(ConL); 6553 op_cost(0); 6554 format %{ %} 6555 interface(CONST_INTER); 6556 %} 6557 6558 // Long Immediate: 64-bit, where highest 16 bits are not 0x0000. 6559 operand immLhighest16() %{ 6560 predicate((n->get_long() & 0xffff000000000000L) != 0L && (n->get_long() & 0x0000ffffffffffffL) == 0L); 6561 match(ConL); 6562 op_cost(0); 6563 format %{ %} 6564 interface(CONST_INTER); 6565 %} 6566 6567 operand immLnegpow2() %{ 6568 predicate(is_power_of_2_long((jlong)-(n->get_long()))); 6569 match(ConL); 6570 op_cost(0); 6571 format %{ %} 6572 interface(CONST_INTER); 6573 %} 6574 6575 operand immLpow2minus1() %{ 6576 predicate(is_power_of_2_long((((jlong) (n->get_long()))+1)) && 6577 (n->get_long() != (jlong)0xffffffffffffffffL)); 6578 match(ConL); 6579 op_cost(0); 6580 format %{ %} 6581 interface(CONST_INTER); 6582 %} 6583 6584 // constant 'long 0'. 6585 operand immL_0() %{ 6586 predicate(n->get_long() == 0L); 6587 match(ConL); 6588 op_cost(0); 6589 format %{ %} 6590 interface(CONST_INTER); 6591 %} 6592 6593 // constat ' long -1'. 6594 operand immL_minus1() %{ 6595 predicate(n->get_long() == -1L); 6596 match(ConL); 6597 op_cost(0); 6598 format %{ %} 6599 interface(CONST_INTER); 6600 %} 6601 6602 // Long Immediate: low 32-bit mask 6603 operand immL_32bits() %{ 6604 predicate(n->get_long() == 0xFFFFFFFFL); 6605 match(ConL); 6606 op_cost(0); 6607 format %{ %} 6608 interface(CONST_INTER); 6609 %} 6610 6611 // Unsigned Long Immediate: 16-bit 6612 operand uimmL16() %{ 6613 predicate(Assembler::is_uimm(n->get_long(), 16)); 6614 match(ConL); 6615 op_cost(0); 6616 format %{ %} 6617 interface(CONST_INTER); 6618 %} 6619 6620 // Float Immediate 6621 operand immF() %{ 6622 match(ConF); 6623 op_cost(40); 6624 format %{ %} 6625 interface(CONST_INTER); 6626 %} 6627 6628 // constant 'float +0.0'. 6629 operand immF_0() %{ 6630 predicate((n->getf() == 0) && 6631 (fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0)); 6632 match(ConF); 6633 op_cost(0); 6634 format %{ %} 6635 interface(CONST_INTER); 6636 %} 6637 6638 // Double Immediate 6639 operand immD() %{ 6640 match(ConD); 6641 op_cost(40); 6642 format %{ %} 6643 interface(CONST_INTER); 6644 %} 6645 6646 // Integer Register Operands 6647 // Integer Destination Register 6648 // See definition of reg_class bits32_reg_rw. 6649 operand iRegIdst() %{ 6650 constraint(ALLOC_IN_RC(bits32_reg_rw)); 6651 match(RegI); 6652 match(rscratch1RegI); 6653 match(rscratch2RegI); 6654 match(rarg1RegI); 6655 match(rarg2RegI); 6656 match(rarg3RegI); 6657 match(rarg4RegI); 6658 format %{ %} 6659 interface(REG_INTER); 6660 %} 6661 6662 // Integer Source Register 6663 // See definition of reg_class bits32_reg_ro. 6664 operand iRegIsrc() %{ 6665 constraint(ALLOC_IN_RC(bits32_reg_ro)); 6666 match(RegI); 6667 match(rscratch1RegI); 6668 match(rscratch2RegI); 6669 match(rarg1RegI); 6670 match(rarg2RegI); 6671 match(rarg3RegI); 6672 match(rarg4RegI); 6673 format %{ %} 6674 interface(REG_INTER); 6675 %} 6676 6677 operand rscratch1RegI() %{ 6678 constraint(ALLOC_IN_RC(rscratch1_bits32_reg)); 6679 match(iRegIdst); 6680 format %{ %} 6681 interface(REG_INTER); 6682 %} 6683 6684 operand rscratch2RegI() %{ 6685 constraint(ALLOC_IN_RC(rscratch2_bits32_reg)); 6686 match(iRegIdst); 6687 format %{ %} 6688 interface(REG_INTER); 6689 %} 6690 6691 operand rarg1RegI() %{ 6692 constraint(ALLOC_IN_RC(rarg1_bits32_reg)); 6693 match(iRegIdst); 6694 format %{ %} 6695 interface(REG_INTER); 6696 %} 6697 6698 operand rarg2RegI() %{ 6699 constraint(ALLOC_IN_RC(rarg2_bits32_reg)); 6700 match(iRegIdst); 6701 format %{ %} 6702 interface(REG_INTER); 6703 %} 6704 6705 operand rarg3RegI() %{ 6706 constraint(ALLOC_IN_RC(rarg3_bits32_reg)); 6707 match(iRegIdst); 6708 format %{ %} 6709 interface(REG_INTER); 6710 %} 6711 6712 operand rarg4RegI() %{ 6713 constraint(ALLOC_IN_RC(rarg4_bits32_reg)); 6714 match(iRegIdst); 6715 format %{ %} 6716 interface(REG_INTER); 6717 %} 6718 6719 operand rarg1RegL() %{ 6720 constraint(ALLOC_IN_RC(rarg1_bits64_reg)); 6721 match(iRegLdst); 6722 format %{ %} 6723 interface(REG_INTER); 6724 %} 6725 6726 operand rarg2RegL() %{ 6727 constraint(ALLOC_IN_RC(rarg2_bits64_reg)); 6728 match(iRegLdst); 6729 format %{ %} 6730 interface(REG_INTER); 6731 %} 6732 6733 operand rarg3RegL() %{ 6734 constraint(ALLOC_IN_RC(rarg3_bits64_reg)); 6735 match(iRegLdst); 6736 format %{ %} 6737 interface(REG_INTER); 6738 %} 6739 6740 operand rarg4RegL() %{ 6741 constraint(ALLOC_IN_RC(rarg4_bits64_reg)); 6742 match(iRegLdst); 6743 format %{ %} 6744 interface(REG_INTER); 6745 %} 6746 6747 // Pointer Destination Register 6748 // See definition of reg_class bits64_reg_rw. 6749 operand iRegPdst() %{ 6750 constraint(ALLOC_IN_RC(bits64_reg_rw)); 6751 match(RegP); 6752 match(rscratch1RegP); 6753 match(rscratch2RegP); 6754 match(rarg1RegP); 6755 match(rarg2RegP); 6756 match(rarg3RegP); 6757 match(rarg4RegP); 6758 format %{ %} 6759 interface(REG_INTER); 6760 %} 6761 6762 // Pointer Destination Register 6763 // Operand not using r11 and r12 (killed in epilog). 6764 operand iRegPdstNoScratch() %{ 6765 constraint(ALLOC_IN_RC(bits64_reg_leaf_call)); 6766 match(RegP); 6767 match(rarg1RegP); 6768 match(rarg2RegP); 6769 match(rarg3RegP); 6770 match(rarg4RegP); 6771 format %{ %} 6772 interface(REG_INTER); 6773 %} 6774 6775 // Pointer Source Register 6776 // See definition of reg_class bits64_reg_ro. 6777 operand iRegPsrc() %{ 6778 constraint(ALLOC_IN_RC(bits64_reg_ro)); 6779 match(RegP); 6780 match(iRegPdst); 6781 match(rscratch1RegP); 6782 match(rscratch2RegP); 6783 match(rarg1RegP); 6784 match(rarg2RegP); 6785 match(rarg3RegP); 6786 match(rarg4RegP); 6787 match(threadRegP); 6788 format %{ %} 6789 interface(REG_INTER); 6790 %} 6791 6792 // Thread operand. 6793 operand threadRegP() %{ 6794 constraint(ALLOC_IN_RC(thread_bits64_reg)); 6795 match(iRegPdst); 6796 format %{ "R16" %} 6797 interface(REG_INTER); 6798 %} 6799 6800 operand rscratch1RegP() %{ 6801 constraint(ALLOC_IN_RC(rscratch1_bits64_reg)); 6802 match(iRegPdst); 6803 format %{ "R11" %} 6804 interface(REG_INTER); 6805 %} 6806 6807 operand rscratch2RegP() %{ 6808 constraint(ALLOC_IN_RC(rscratch2_bits64_reg)); 6809 match(iRegPdst); 6810 format %{ %} 6811 interface(REG_INTER); 6812 %} 6813 6814 operand rarg1RegP() %{ 6815 constraint(ALLOC_IN_RC(rarg1_bits64_reg)); 6816 match(iRegPdst); 6817 format %{ %} 6818 interface(REG_INTER); 6819 %} 6820 6821 operand rarg2RegP() %{ 6822 constraint(ALLOC_IN_RC(rarg2_bits64_reg)); 6823 match(iRegPdst); 6824 format %{ %} 6825 interface(REG_INTER); 6826 %} 6827 6828 operand rarg3RegP() %{ 6829 constraint(ALLOC_IN_RC(rarg3_bits64_reg)); 6830 match(iRegPdst); 6831 format %{ %} 6832 interface(REG_INTER); 6833 %} 6834 6835 operand rarg4RegP() %{ 6836 constraint(ALLOC_IN_RC(rarg4_bits64_reg)); 6837 match(iRegPdst); 6838 format %{ %} 6839 interface(REG_INTER); 6840 %} 6841 6842 operand iRegNsrc() %{ 6843 constraint(ALLOC_IN_RC(bits32_reg_ro)); 6844 match(RegN); 6845 match(iRegNdst); 6846 6847 format %{ %} 6848 interface(REG_INTER); 6849 %} 6850 6851 operand iRegNdst() %{ 6852 constraint(ALLOC_IN_RC(bits32_reg_rw)); 6853 match(RegN); 6854 6855 format %{ %} 6856 interface(REG_INTER); 6857 %} 6858 6859 // Long Destination Register 6860 // See definition of reg_class bits64_reg_rw. 6861 operand iRegLdst() %{ 6862 constraint(ALLOC_IN_RC(bits64_reg_rw)); 6863 match(RegL); 6864 match(rscratch1RegL); 6865 match(rscratch2RegL); 6866 format %{ %} 6867 interface(REG_INTER); 6868 %} 6869 6870 // Long Source Register 6871 // See definition of reg_class bits64_reg_ro. 6872 operand iRegLsrc() %{ 6873 constraint(ALLOC_IN_RC(bits64_reg_ro)); 6874 match(RegL); 6875 match(iRegLdst); 6876 match(rscratch1RegL); 6877 match(rscratch2RegL); 6878 format %{ %} 6879 interface(REG_INTER); 6880 %} 6881 6882 // Special operand for ConvL2I. 6883 operand iRegL2Isrc(iRegLsrc reg) %{ 6884 constraint(ALLOC_IN_RC(bits64_reg_ro)); 6885 match(ConvL2I reg); 6886 format %{ "ConvL2I($reg)" %} 6887 interface(REG_INTER) 6888 %} 6889 6890 operand rscratch1RegL() %{ 6891 constraint(ALLOC_IN_RC(rscratch1_bits64_reg)); 6892 match(RegL); 6893 format %{ %} 6894 interface(REG_INTER); 6895 %} 6896 6897 operand rscratch2RegL() %{ 6898 constraint(ALLOC_IN_RC(rscratch2_bits64_reg)); 6899 match(RegL); 6900 format %{ %} 6901 interface(REG_INTER); 6902 %} 6903 6904 // Condition Code Flag Registers 6905 operand flagsReg() %{ 6906 constraint(ALLOC_IN_RC(int_flags)); 6907 match(RegFlags); 6908 format %{ %} 6909 interface(REG_INTER); 6910 %} 6911 6912 // Condition Code Flag Register CR0 6913 operand flagsRegCR0() %{ 6914 constraint(ALLOC_IN_RC(int_flags_CR0)); 6915 match(RegFlags); 6916 format %{ "CR0" %} 6917 interface(REG_INTER); 6918 %} 6919 6920 operand flagsRegCR1() %{ 6921 constraint(ALLOC_IN_RC(int_flags_CR1)); 6922 match(RegFlags); 6923 format %{ "CR1" %} 6924 interface(REG_INTER); 6925 %} 6926 6927 operand flagsRegCR6() %{ 6928 constraint(ALLOC_IN_RC(int_flags_CR6)); 6929 match(RegFlags); 6930 format %{ "CR6" %} 6931 interface(REG_INTER); 6932 %} 6933 6934 operand regCTR() %{ 6935 constraint(ALLOC_IN_RC(ctr_reg)); 6936 // RegFlags should work. Introducing a RegSpecial type would cause a 6937 // lot of changes. 6938 match(RegFlags); 6939 format %{"SR_CTR" %} 6940 interface(REG_INTER); 6941 %} 6942 6943 operand regD() %{ 6944 constraint(ALLOC_IN_RC(dbl_reg)); 6945 match(RegD); 6946 format %{ %} 6947 interface(REG_INTER); 6948 %} 6949 6950 operand regF() %{ 6951 constraint(ALLOC_IN_RC(flt_reg)); 6952 match(RegF); 6953 format %{ %} 6954 interface(REG_INTER); 6955 %} 6956 6957 // Special Registers 6958 6959 // Method Register 6960 operand inline_cache_regP(iRegPdst reg) %{ 6961 constraint(ALLOC_IN_RC(r19_bits64_reg)); // inline_cache_reg 6962 match(reg); 6963 format %{ %} 6964 interface(REG_INTER); 6965 %} 6966 6967 operand compiler_method_oop_regP(iRegPdst reg) %{ 6968 constraint(ALLOC_IN_RC(rscratch1_bits64_reg)); // compiler_method_oop_reg 6969 match(reg); 6970 format %{ %} 6971 interface(REG_INTER); 6972 %} 6973 6974 operand interpreter_method_oop_regP(iRegPdst reg) %{ 6975 constraint(ALLOC_IN_RC(r19_bits64_reg)); // interpreter_method_oop_reg 6976 match(reg); 6977 format %{ %} 6978 interface(REG_INTER); 6979 %} 6980 6981 // Operands to remove register moves in unscaled mode. 6982 // Match read/write registers with an EncodeP node if neither shift nor add are required. 6983 operand iRegP2N(iRegPsrc reg) %{ 6984 predicate(false /* TODO: PPC port MatchDecodeNodes*/&& Universe::narrow_oop_shift() == 0); 6985 constraint(ALLOC_IN_RC(bits64_reg_ro)); 6986 match(EncodeP reg); 6987 format %{ "$reg" %} 6988 interface(REG_INTER) 6989 %} 6990 6991 operand iRegN2P(iRegNsrc reg) %{ 6992 predicate(false /* TODO: PPC port MatchDecodeNodes*/); 6993 constraint(ALLOC_IN_RC(bits32_reg_ro)); 6994 match(DecodeN reg); 6995 format %{ "$reg" %} 6996 interface(REG_INTER) 6997 %} 6998 6999 //----------Complex Operands--------------------------------------------------- 7000 // Indirect Memory Reference 7001 operand indirect(iRegPsrc reg) %{ 7002 constraint(ALLOC_IN_RC(bits64_reg_ro)); 7003 match(reg); 7004 op_cost(100); 7005 format %{ "[$reg]" %} 7006 interface(MEMORY_INTER) %{ 7007 base($reg); 7008 index(0x0); 7009 scale(0x0); 7010 disp(0x0); 7011 %} 7012 %} 7013 7014 // Indirect with Offset 7015 operand indOffset16(iRegPsrc reg, immL16 offset) %{ 7016 constraint(ALLOC_IN_RC(bits64_reg_ro)); 7017 match(AddP reg offset); 7018 op_cost(100); 7019 format %{ "[$reg + $offset]" %} 7020 interface(MEMORY_INTER) %{ 7021 base($reg); 7022 index(0x0); 7023 scale(0x0); 7024 disp($offset); 7025 %} 7026 %} 7027 7028 // Indirect with 4-aligned Offset 7029 operand indOffset16Alg4(iRegPsrc reg, immL16Alg4 offset) %{ 7030 constraint(ALLOC_IN_RC(bits64_reg_ro)); 7031 match(AddP reg offset); 7032 op_cost(100); 7033 format %{ "[$reg + $offset]" %} 7034 interface(MEMORY_INTER) %{ 7035 base($reg); 7036 index(0x0); 7037 scale(0x0); 7038 disp($offset); 7039 %} 7040 %} 7041 7042 //----------Complex Operands for Compressed OOPs------------------------------- 7043 // Compressed OOPs with narrow_oop_shift == 0. 7044 7045 // Indirect Memory Reference, compressed OOP 7046 operand indirectNarrow(iRegNsrc reg) %{ 7047 predicate(false /* TODO: PPC port MatchDecodeNodes*/); 7048 constraint(ALLOC_IN_RC(bits64_reg_ro)); 7049 match(DecodeN reg); 7050 op_cost(100); 7051 format %{ "[$reg]" %} 7052 interface(MEMORY_INTER) %{ 7053 base($reg); 7054 index(0x0); 7055 scale(0x0); 7056 disp(0x0); 7057 %} 7058 %} 7059 7060 // Indirect with Offset, compressed OOP 7061 operand indOffset16Narrow(iRegNsrc reg, immL16 offset) %{ 7062 predicate(false /* TODO: PPC port MatchDecodeNodes*/); 7063 constraint(ALLOC_IN_RC(bits64_reg_ro)); 7064 match(AddP (DecodeN reg) offset); 7065 op_cost(100); 7066 format %{ "[$reg + $offset]" %} 7067 interface(MEMORY_INTER) %{ 7068 base($reg); 7069 index(0x0); 7070 scale(0x0); 7071 disp($offset); 7072 %} 7073 %} 7074 7075 // Indirect with 4-aligned Offset, compressed OOP 7076 operand indOffset16NarrowAlg4(iRegNsrc reg, immL16Alg4 offset) %{ 7077 predicate(false /* TODO: PPC port MatchDecodeNodes*/); 7078 constraint(ALLOC_IN_RC(bits64_reg_ro)); 7079 match(AddP (DecodeN reg) offset); 7080 op_cost(100); 7081 format %{ "[$reg + $offset]" %} 7082 interface(MEMORY_INTER) %{ 7083 base($reg); 7084 index(0x0); 7085 scale(0x0); 7086 disp($offset); 7087 %} 7088 %} 7089 7090 //----------Special Memory Operands-------------------------------------------- 7091 // Stack Slot Operand 7092 // 7093 // This operand is used for loading and storing temporary values on 7094 // the stack where a match requires a value to flow through memory. 7095 operand stackSlotI(sRegI reg) %{ 7096 constraint(ALLOC_IN_RC(stack_slots)); 7097 op_cost(100); 7098 //match(RegI); 7099 format %{ "[sp+$reg]" %} 7100 interface(MEMORY_INTER) %{ 7101 base(0x1); // R1_SP 7102 index(0x0); 7103 scale(0x0); 7104 disp($reg); // Stack Offset 7105 %} 7106 %} 7107 7108 operand stackSlotL(sRegL reg) %{ 7109 constraint(ALLOC_IN_RC(stack_slots)); 7110 op_cost(100); 7111 //match(RegL); 7112 format %{ "[sp+$reg]" %} 7113 interface(MEMORY_INTER) %{ 7114 base(0x1); // R1_SP 7115 index(0x0); 7116 scale(0x0); 7117 disp($reg); // Stack Offset 7118 %} 7119 %} 7120 7121 operand stackSlotP(sRegP reg) %{ 7122 constraint(ALLOC_IN_RC(stack_slots)); 7123 op_cost(100); 7124 //match(RegP); 7125 format %{ "[sp+$reg]" %} 7126 interface(MEMORY_INTER) %{ 7127 base(0x1); // R1_SP 7128 index(0x0); 7129 scale(0x0); 7130 disp($reg); // Stack Offset 7131 %} 7132 %} 7133 7134 operand stackSlotF(sRegF reg) %{ 7135 constraint(ALLOC_IN_RC(stack_slots)); 7136 op_cost(100); 7137 //match(RegF); 7138 format %{ "[sp+$reg]" %} 7139 interface(MEMORY_INTER) %{ 7140 base(0x1); // R1_SP 7141 index(0x0); 7142 scale(0x0); 7143 disp($reg); // Stack Offset 7144 %} 7145 %} 7146 7147 operand stackSlotD(sRegD reg) %{ 7148 constraint(ALLOC_IN_RC(stack_slots)); 7149 op_cost(100); 7150 //match(RegD); 7151 format %{ "[sp+$reg]" %} 7152 interface(MEMORY_INTER) %{ 7153 base(0x1); // R1_SP 7154 index(0x0); 7155 scale(0x0); 7156 disp($reg); // Stack Offset 7157 %} 7158 %} 7159 7160 // Operands for expressing Control Flow 7161 // NOTE: Label is a predefined operand which should not be redefined in 7162 // the AD file. It is generically handled within the ADLC. 7163 7164 //----------Conditional Branch Operands---------------------------------------- 7165 // Comparison Op 7166 // 7167 // This is the operation of the comparison, and is limited to the 7168 // following set of codes: L (<), LE (<=), G (>), GE (>=), E (==), NE 7169 // (!=). 7170 // 7171 // Other attributes of the comparison, such as unsignedness, are specified 7172 // by the comparison instruction that sets a condition code flags register. 7173 // That result is represented by a flags operand whose subtype is appropriate 7174 // to the unsignedness (etc.) of the comparison. 7175 // 7176 // Later, the instruction which matches both the Comparison Op (a Bool) and 7177 // the flags (produced by the Cmp) specifies the coding of the comparison op 7178 // by matching a specific subtype of Bool operand below. 7179 7180 // When used for floating point comparisons: unordered same as less. 7181 operand cmpOp() %{ 7182 match(Bool); 7183 format %{ "" %} 7184 interface(COND_INTER) %{ 7185 // BO only encodes bit 4 of bcondCRbiIsX, as bits 1-3 are always '100'. 7186 // BO & BI 7187 equal(0xA); // 10 10: bcondCRbiIs1 & Condition::equal 7188 not_equal(0x2); // 00 10: bcondCRbiIs0 & Condition::equal 7189 less(0x8); // 10 00: bcondCRbiIs1 & Condition::less 7190 greater_equal(0x0); // 00 00: bcondCRbiIs0 & Condition::less 7191 less_equal(0x1); // 00 01: bcondCRbiIs0 & Condition::greater 7192 greater(0x9); // 10 01: bcondCRbiIs1 & Condition::greater 7193 //overflow(0xB); // 10 11: bcondCRbiIs1 & Condition::summary_overflow 7194 //no_overflow(0x3); // 00 11: bcondCRbiIs0 & Condition::summary_overflow 7195 %} 7196 %} 7197 7198 //----------OPERAND CLASSES---------------------------------------------------- 7199 // Operand Classes are groups of operands that are used to simplify 7200 // instruction definitions by not requiring the AD writer to specify 7201 // seperate instructions for every form of operand when the 7202 // instruction accepts multiple operand types with the same basic 7203 // encoding and format. The classic case of this is memory operands. 7204 // Indirect is not included since its use is limited to Compare & Swap. 7205 7206 opclass memory(indirect, indOffset16 /*, indIndex, tlsReference*/, indirectNarrow, indOffset16Narrow); 7207 // Memory operand where offsets are 4-aligned. Required for ld, std. 7208 opclass memoryAlg4(indirect, indOffset16Alg4, indirectNarrow, indOffset16NarrowAlg4); 7209 opclass indirectMemory(indirect, indirectNarrow); 7210 7211 // Special opclass for I and ConvL2I. 7212 opclass iRegIsrc_iRegL2Isrc(iRegIsrc, iRegL2Isrc); 7213 7214 // Operand classes to match encode and decode. iRegN_P2N is only used 7215 // for storeN. I have never seen an encode node elsewhere. 7216 opclass iRegN_P2N(iRegNsrc, iRegP2N); 7217 opclass iRegP_N2P(iRegPsrc, iRegN2P); 7218 7219 //----------PIPELINE----------------------------------------------------------- 7220 7221 pipeline %{ 7222 7223 // See J.M.Tendler et al. "Power4 system microarchitecture", IBM 7224 // J. Res. & Dev., No. 1, Jan. 2002. 7225 7226 //----------ATTRIBUTES--------------------------------------------------------- 7227 attributes %{ 7228 7229 // Power4 instructions are of fixed length. 7230 fixed_size_instructions; 7231 7232 // TODO: if `bundle' means number of instructions fetched 7233 // per cycle, this is 8. If `bundle' means Power4 `group', that is 7234 // max instructions issued per cycle, this is 5. 7235 max_instructions_per_bundle = 8; 7236 7237 // A Power4 instruction is 4 bytes long. 7238 instruction_unit_size = 4; 7239 7240 // The Power4 processor fetches 64 bytes... 7241 instruction_fetch_unit_size = 64; 7242 7243 // ...in one line 7244 instruction_fetch_units = 1 7245 7246 // Unused, list one so that array generated by adlc is not empty. 7247 // Aix compiler chokes if _nop_count = 0. 7248 nops(fxNop); 7249 %} 7250 7251 //----------RESOURCES---------------------------------------------------------- 7252 // Resources are the functional units available to the machine 7253 resources( 7254 PPC_BR, // branch unit 7255 PPC_CR, // condition unit 7256 PPC_FX1, // integer arithmetic unit 1 7257 PPC_FX2, // integer arithmetic unit 2 7258 PPC_LDST1, // load/store unit 1 7259 PPC_LDST2, // load/store unit 2 7260 PPC_FP1, // float arithmetic unit 1 7261 PPC_FP2, // float arithmetic unit 2 7262 PPC_LDST = PPC_LDST1 | PPC_LDST2, 7263 PPC_FX = PPC_FX1 | PPC_FX2, 7264 PPC_FP = PPC_FP1 | PPC_FP2 7265 ); 7266 7267 //----------PIPELINE DESCRIPTION----------------------------------------------- 7268 // Pipeline Description specifies the stages in the machine's pipeline 7269 pipe_desc( 7270 // Power4 longest pipeline path 7271 PPC_IF, // instruction fetch 7272 PPC_IC, 7273 //PPC_BP, // branch prediction 7274 PPC_D0, // decode 7275 PPC_D1, // decode 7276 PPC_D2, // decode 7277 PPC_D3, // decode 7278 PPC_Xfer1, 7279 PPC_GD, // group definition 7280 PPC_MP, // map 7281 PPC_ISS, // issue 7282 PPC_RF, // resource fetch 7283 PPC_EX1, // execute (all units) 7284 PPC_EX2, // execute (FP, LDST) 7285 PPC_EX3, // execute (FP, LDST) 7286 PPC_EX4, // execute (FP) 7287 PPC_EX5, // execute (FP) 7288 PPC_EX6, // execute (FP) 7289 PPC_WB, // write back 7290 PPC_Xfer2, 7291 PPC_CP 7292 ); 7293 7294 //----------PIPELINE CLASSES--------------------------------------------------- 7295 // Pipeline Classes describe the stages in which input and output are 7296 // referenced by the hardware pipeline. 7297 7298 // Simple pipeline classes. 7299 7300 // Default pipeline class. 7301 pipe_class pipe_class_default() %{ 7302 single_instruction; 7303 fixed_latency(2); 7304 %} 7305 7306 // Pipeline class for empty instructions. 7307 pipe_class pipe_class_empty() %{ 7308 single_instruction; 7309 fixed_latency(0); 7310 %} 7311 7312 // Pipeline class for compares. 7313 pipe_class pipe_class_compare() %{ 7314 single_instruction; 7315 fixed_latency(16); 7316 %} 7317 7318 // Pipeline class for traps. 7319 pipe_class pipe_class_trap() %{ 7320 single_instruction; 7321 fixed_latency(100); 7322 %} 7323 7324 // Pipeline class for memory operations. 7325 pipe_class pipe_class_memory() %{ 7326 single_instruction; 7327 fixed_latency(16); 7328 %} 7329 7330 // Pipeline class for call. 7331 pipe_class pipe_class_call() %{ 7332 single_instruction; 7333 fixed_latency(100); 7334 %} 7335 7336 // Define the class for the Nop node. 7337 define %{ 7338 MachNop = pipe_class_default; 7339 %} 7340 7341 %} 7342 7343 //----------INSTRUCTIONS------------------------------------------------------- 7344 7345 // Naming of instructions: 7346 // opA_operB / opA_operB_operC: 7347 // Operation 'op' with one or two source operands 'oper'. Result 7348 // type is A, source operand types are B and C. 7349 // Iff A == B == C, B and C are left out. 7350 // 7351 // The instructions are ordered according to the following scheme: 7352 // - loads 7353 // - load constants 7354 // - prefetch 7355 // - store 7356 // - encode/decode 7357 // - membar 7358 // - conditional moves 7359 // - compare & swap 7360 // - arithmetic and logic operations 7361 // * int: Add, Sub, Mul, Div, Mod 7362 // * int: lShift, arShift, urShift, rot 7363 // * float: Add, Sub, Mul, Div 7364 // * and, or, xor ... 7365 // - register moves: float <-> int, reg <-> stack, repl 7366 // - cast (high level type cast, XtoP, castPP, castII, not_null etc. 7367 // - conv (low level type cast requiring bit changes (sign extend etc) 7368 // - compares, range & zero checks. 7369 // - branches 7370 // - complex operations, intrinsics, min, max, replicate 7371 // - lock 7372 // - Calls 7373 // 7374 // If there are similar instructions with different types they are sorted: 7375 // int before float 7376 // small before big 7377 // signed before unsigned 7378 // e.g., loadS before loadUS before loadI before loadF. 7379 7380 7381 //----------Load/Store Instructions-------------------------------------------- 7382 7383 //----------Load Instructions-------------------------------------------------- 7384 7385 // Converts byte to int. 7386 // As convB2I_reg, but without match rule. The match rule of convB2I_reg 7387 // reuses the 'amount' operand, but adlc expects that operand specification 7388 // and operands in match rule are equivalent. 7389 instruct convB2I_reg_2(iRegIdst dst, iRegIsrc src) %{ 7390 effect(DEF dst, USE src); 7391 format %{ "EXTSB $dst, $src \t// byte->int" %} 7392 size(4); 7393 ins_encode( enc_extsb(dst, src) ); 7394 ins_pipe(pipe_class_default); 7395 %} 7396 7397 instruct loadUB_indirect(iRegIdst dst, indirectMemory mem) %{ 7398 // match-rule, false predicate 7399 match(Set dst (LoadB mem)); 7400 predicate(false); 7401 7402 format %{ "LBZ $dst, $mem" %} 7403 size(4); 7404 ins_encode( enc_lbz(dst, mem) ); 7405 ins_pipe(pipe_class_memory); 7406 %} 7407 7408 instruct loadUB_indirect_ac(iRegIdst dst, indirectMemory mem) %{ 7409 // match-rule, false predicate 7410 match(Set dst (LoadB mem)); 7411 predicate(false); 7412 7413 format %{ "LBZ $dst, $mem\n\t" 7414 "TWI $dst\n\t" 7415 "ISYNC" %} 7416 size(12); 7417 ins_encode( enc_lbz_ac(dst, mem) ); 7418 ins_pipe(pipe_class_memory); 7419 %} 7420 7421 // Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B. 7422 instruct loadB_indirect_Ex(iRegIdst dst, indirectMemory mem) %{ 7423 match(Set dst (LoadB mem)); 7424 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7425 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 7426 expand %{ 7427 iRegIdst tmp; 7428 loadUB_indirect(tmp, mem); 7429 convB2I_reg_2(dst, tmp); 7430 %} 7431 %} 7432 7433 instruct loadB_indirect_ac_Ex(iRegIdst dst, indirectMemory mem) %{ 7434 match(Set dst (LoadB mem)); 7435 ins_cost(3*MEMORY_REF_COST + DEFAULT_COST); 7436 expand %{ 7437 iRegIdst tmp; 7438 loadUB_indirect_ac(tmp, mem); 7439 convB2I_reg_2(dst, tmp); 7440 %} 7441 %} 7442 7443 instruct loadUB_indOffset16(iRegIdst dst, indOffset16 mem) %{ 7444 // match-rule, false predicate 7445 match(Set dst (LoadB mem)); 7446 predicate(false); 7447 7448 format %{ "LBZ $dst, $mem" %} 7449 size(4); 7450 ins_encode( enc_lbz(dst, mem) ); 7451 ins_pipe(pipe_class_memory); 7452 %} 7453 7454 instruct loadUB_indOffset16_ac(iRegIdst dst, indOffset16 mem) %{ 7455 // match-rule, false predicate 7456 match(Set dst (LoadB mem)); 7457 predicate(false); 7458 7459 format %{ "LBZ $dst, $mem\n\t" 7460 "TWI $dst\n\t" 7461 "ISYNC" %} 7462 size(12); 7463 ins_encode( enc_lbz_ac(dst, mem) ); 7464 ins_pipe(pipe_class_memory); 7465 %} 7466 7467 // Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B. 7468 instruct loadB_indOffset16_Ex(iRegIdst dst, indOffset16 mem) %{ 7469 match(Set dst (LoadB mem)); 7470 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7471 ins_cost(MEMORY_REF_COST + DEFAULT_COST); 7472 7473 expand %{ 7474 iRegIdst tmp; 7475 loadUB_indOffset16(tmp, mem); 7476 convB2I_reg_2(dst, tmp); 7477 %} 7478 %} 7479 7480 instruct loadB_indOffset16_ac_Ex(iRegIdst dst, indOffset16 mem) %{ 7481 match(Set dst (LoadB mem)); 7482 ins_cost(3*MEMORY_REF_COST + DEFAULT_COST); 7483 7484 expand %{ 7485 iRegIdst tmp; 7486 loadUB_indOffset16_ac(tmp, mem); 7487 convB2I_reg_2(dst, tmp); 7488 %} 7489 %} 7490 7491 // Load Unsigned Byte (8bit UNsigned) into an int reg. 7492 instruct loadUB(iRegIdst dst, memory mem) %{ 7493 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7494 match(Set dst (LoadUB mem)); 7495 ins_cost(MEMORY_REF_COST); 7496 7497 format %{ "LBZ $dst, $mem \t// byte, zero-extend to int" %} 7498 size(4); 7499 ins_encode( enc_lbz(dst, mem) ); 7500 ins_pipe(pipe_class_memory); 7501 %} 7502 7503 // Load Unsigned Byte (8bit UNsigned) acquire. 7504 instruct loadUB_ac(iRegIdst dst, memory mem) %{ 7505 match(Set dst (LoadUB mem)); 7506 ins_cost(3*MEMORY_REF_COST); 7507 7508 format %{ "LBZ $dst, $mem \t// byte, zero-extend to int, acquire\n\t" 7509 "TWI $dst\n\t" 7510 "ISYNC" %} 7511 size(12); 7512 ins_encode( enc_lbz_ac(dst, mem) ); 7513 ins_pipe(pipe_class_memory); 7514 %} 7515 7516 // Load Unsigned Byte (8bit UNsigned) into a Long Register. 7517 instruct loadUB2L(iRegLdst dst, memory mem) %{ 7518 match(Set dst (ConvI2L (LoadUB mem))); 7519 predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf)); 7520 ins_cost(MEMORY_REF_COST); 7521 7522 format %{ "LBZ $dst, $mem \t// byte, zero-extend to long" %} 7523 size(4); 7524 ins_encode( enc_lbz(dst, mem) ); 7525 ins_pipe(pipe_class_memory); 7526 %} 7527 7528 instruct loadUB2L_ac(iRegLdst dst, memory mem) %{ 7529 match(Set dst (ConvI2L (LoadUB mem))); 7530 ins_cost(3*MEMORY_REF_COST); 7531 7532 format %{ "LBZ $dst, $mem \t// byte, zero-extend to long, acquire\n\t" 7533 "TWI $dst\n\t" 7534 "ISYNC" %} 7535 size(12); 7536 ins_encode( enc_lbz_ac(dst, mem) ); 7537 ins_pipe(pipe_class_memory); 7538 %} 7539 7540 // Load Short (16bit signed) 7541 instruct loadS(iRegIdst dst, memory mem) %{ 7542 match(Set dst (LoadS mem)); 7543 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7544 ins_cost(MEMORY_REF_COST); 7545 7546 format %{ "LHA $dst, $mem" %} 7547 size(4); 7548 ins_encode( enc_lha(dst, mem) ); 7549 ins_pipe(pipe_class_memory); 7550 %} 7551 7552 // Load Short (16bit signed) acquire. 7553 instruct loadS_ac(iRegIdst dst, memory mem) %{ 7554 match(Set dst (LoadS mem)); 7555 ins_cost(3*MEMORY_REF_COST); 7556 7557 format %{ "LHA $dst, $mem\t acquire\n\t" 7558 "TWI $dst\n\t" 7559 "ISYNC" %} 7560 size(12); 7561 ins_encode( enc_lha_ac(dst, mem) ); 7562 ins_pipe(pipe_class_memory); 7563 %} 7564 7565 // Load Char (16bit unsigned) 7566 instruct loadUS(iRegIdst dst, memory mem) %{ 7567 match(Set dst (LoadUS mem)); 7568 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7569 ins_cost(MEMORY_REF_COST); 7570 7571 format %{ "LHZ $dst, $mem" %} 7572 size(4); 7573 ins_encode( enc_lhz(dst, mem) ); 7574 ins_pipe(pipe_class_memory); 7575 %} 7576 7577 // Load Char (16bit unsigned) acquire. 7578 instruct loadUS_ac(iRegIdst dst, memory mem) %{ 7579 match(Set dst (LoadUS mem)); 7580 ins_cost(3*MEMORY_REF_COST); 7581 7582 format %{ "LHZ $dst, $mem \t// acquire\n\t" 7583 "TWI $dst\n\t" 7584 "ISYNC" %} 7585 size(12); 7586 ins_encode( enc_lhz_ac(dst, mem) ); 7587 ins_pipe(pipe_class_memory); 7588 %} 7589 7590 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register. 7591 instruct loadUS2L(iRegLdst dst, memory mem) %{ 7592 match(Set dst (ConvI2L (LoadUS mem))); 7593 predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf)); 7594 ins_cost(MEMORY_REF_COST); 7595 7596 format %{ "LHZ $dst, $mem \t// short, zero-extend to long" %} 7597 size(4); 7598 ins_encode( enc_lhz(dst, mem) ); 7599 ins_pipe(pipe_class_memory); 7600 %} 7601 7602 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register acquire. 7603 instruct loadUS2L_ac(iRegLdst dst, memory mem) %{ 7604 match(Set dst (ConvI2L (LoadUS mem))); 7605 ins_cost(3*MEMORY_REF_COST); 7606 7607 format %{ "LHZ $dst, $mem \t// short, zero-extend to long, acquire\n\t" 7608 "TWI $dst\n\t" 7609 "ISYNC" %} 7610 size(12); 7611 ins_encode( enc_lhz_ac(dst, mem) ); 7612 ins_pipe(pipe_class_memory); 7613 %} 7614 7615 // Load Integer. 7616 instruct loadI(iRegIdst dst, memory mem) %{ 7617 match(Set dst (LoadI mem)); 7618 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7619 ins_cost(MEMORY_REF_COST); 7620 7621 format %{ "LWZ $dst, $mem" %} 7622 size(4); 7623 ins_encode( enc_lwz(dst, mem) ); 7624 ins_pipe(pipe_class_memory); 7625 %} 7626 7627 // Load Integer acquire. 7628 instruct loadI_ac(iRegIdst dst, memory mem) %{ 7629 match(Set dst (LoadI mem)); 7630 ins_cost(3*MEMORY_REF_COST); 7631 7632 format %{ "LWZ $dst, $mem \t// load acquire\n\t" 7633 "TWI $dst\n\t" 7634 "ISYNC" %} 7635 size(12); 7636 ins_encode( enc_lwz_ac(dst, mem) ); 7637 ins_pipe(pipe_class_memory); 7638 %} 7639 7640 // Match loading integer and casting it to unsigned int in 7641 // long register. 7642 // LoadI + ConvI2L + AndL 0xffffffff. 7643 instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{ 7644 match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); 7645 predicate(_kids[0]->_kids[0]->_leaf->as_Load()->is_unordered()); 7646 ins_cost(MEMORY_REF_COST); 7647 7648 format %{ "LWZ $dst, $mem \t// zero-extend to long" %} 7649 size(4); 7650 ins_encode( enc_lwz(dst, mem) ); 7651 ins_pipe(pipe_class_memory); 7652 %} 7653 7654 // SAPJVM GL 2013-10-29 Match ConvI2L(LoadI) 7655 // Match loading integer and casting it to long. 7656 instruct loadI2L(iRegLdst dst, memoryAlg4 mem) %{ 7657 match(Set dst (ConvI2L (LoadI mem))); 7658 predicate(_kids[0]->_leaf->as_Load()->is_unordered()); 7659 ins_cost(MEMORY_REF_COST); 7660 7661 format %{ "LWA $dst, $mem \t// loadI2L" %} 7662 size(4); 7663 ins_encode( enc_lwa(dst, mem) ); 7664 ins_pipe(pipe_class_memory); 7665 %} 7666 7667 // Match loading integer and casting it to long - acquire. 7668 instruct loadI2L_ac(iRegLdst dst, memoryAlg4 mem) %{ 7669 match(Set dst (ConvI2L (LoadI mem))); 7670 ins_cost(3*MEMORY_REF_COST); 7671 7672 format %{ "LWA $dst, $mem \t// loadI2L acquire" 7673 "TWI $dst\n\t" 7674 "ISYNC" %} 7675 size(12); 7676 ins_encode( enc_lwa_ac(dst, mem) ); 7677 ins_pipe(pipe_class_memory); 7678 %} 7679 7680 // Load Long - aligned 7681 instruct loadL(iRegLdst dst, memoryAlg4 mem) %{ 7682 match(Set dst (LoadL mem)); 7683 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7684 ins_cost(MEMORY_REF_COST); 7685 7686 format %{ "LD $dst, $mem \t// long" %} 7687 size(4); 7688 ins_encode( enc_ld(dst, mem) ); 7689 ins_pipe(pipe_class_memory); 7690 %} 7691 7692 // Load Long - aligned acquire. 7693 instruct loadL_ac(iRegLdst dst, memoryAlg4 mem) %{ 7694 match(Set dst (LoadL mem)); 7695 ins_cost(3*MEMORY_REF_COST); 7696 7697 format %{ "LD $dst, $mem \t// long acquire\n\t" 7698 "TWI $dst\n\t" 7699 "ISYNC" %} 7700 size(12); 7701 ins_encode( enc_ld_ac(dst, mem) ); 7702 ins_pipe(pipe_class_memory); 7703 %} 7704 7705 // Load Long - UNaligned 7706 instruct loadL_unaligned(iRegLdst dst, memoryAlg4 mem) %{ 7707 match(Set dst (LoadL_unaligned mem)); 7708 // predicate(...) // Unaligned_ac is not needed (and wouldn't make sense). 7709 ins_cost(MEMORY_REF_COST); 7710 7711 format %{ "LD $dst, $mem \t// unaligned long" %} 7712 size(4); 7713 ins_encode( enc_ld(dst, mem) ); 7714 ins_pipe(pipe_class_memory); 7715 %} 7716 7717 // Load nodes for superwords 7718 7719 // Load Aligned Packed Byte 7720 instruct loadV8(iRegLdst dst, memoryAlg4 mem) %{ 7721 predicate(n->as_LoadVector()->memory_size() == 8); 7722 match(Set dst (LoadVector mem)); 7723 ins_cost(MEMORY_REF_COST); 7724 7725 format %{ "LD $dst, $mem \t// load 8-byte Vector" %} 7726 size(4); 7727 ins_encode( enc_ld(dst, mem) ); 7728 ins_pipe(pipe_class_memory); 7729 %} 7730 7731 // Load Range, range = array length (=jint) 7732 instruct loadRange(iRegIdst dst, memory mem) %{ 7733 match(Set dst (LoadRange mem)); 7734 ins_cost(MEMORY_REF_COST); 7735 7736 format %{ "LWZ $dst, $mem \t// range" %} 7737 size(4); 7738 ins_encode( enc_lwz(dst, mem) ); 7739 ins_pipe(pipe_class_memory); 7740 %} 7741 7742 // Load Compressed Pointer 7743 instruct loadN(iRegNdst dst, memory mem) %{ 7744 match(Set dst (LoadN mem)); 7745 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7746 ins_cost(MEMORY_REF_COST); 7747 7748 format %{ "LWZ $dst, $mem \t// load compressed ptr" %} 7749 size(4); 7750 ins_encode( enc_lwz(dst, mem) ); 7751 ins_pipe(pipe_class_memory); 7752 %} 7753 7754 // Load Compressed Pointer acquire. 7755 instruct loadN_ac(iRegNdst dst, memory mem) %{ 7756 match(Set dst (LoadN mem)); 7757 ins_cost(3*MEMORY_REF_COST); 7758 7759 format %{ "LWZ $dst, $mem \t// load acquire compressed ptr\n\t" 7760 "TWI $dst\n\t" 7761 "ISYNC" %} 7762 size(12); 7763 ins_encode( enc_lwz_ac(dst, mem) ); 7764 ins_pipe(pipe_class_memory); 7765 %} 7766 7767 // Load Compressed Pointer and decode it if narrow_oop_shift == 0. 7768 instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{ 7769 match(Set dst (DecodeN (LoadN mem))); 7770 predicate(_kids[0]->_leaf->as_Load()->is_unordered() && Universe::narrow_oop_shift() == 0); 7771 ins_cost(MEMORY_REF_COST); 7772 7773 format %{ "LWZ $dst, $mem \t// DecodeN (unscaled)" %} 7774 size(4); 7775 ins_encode( enc_lwz(dst, mem) ); 7776 ins_pipe(pipe_class_memory); 7777 %} 7778 7779 // Load Pointer 7780 instruct loadP(iRegPdst dst, memoryAlg4 mem) %{ 7781 match(Set dst (LoadP mem)); 7782 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7783 ins_cost(MEMORY_REF_COST); 7784 7785 format %{ "LD $dst, $mem \t// ptr" %} 7786 size(4); 7787 ins_encode( enc_ld(dst, mem) ); 7788 ins_pipe(pipe_class_memory); 7789 %} 7790 7791 // Load Pointer acquire. 7792 instruct loadP_ac(iRegPdst dst, memoryAlg4 mem) %{ 7793 match(Set dst (LoadP mem)); 7794 ins_cost(3*MEMORY_REF_COST); 7795 7796 format %{ "LD $dst, $mem \t// ptr acquire\n\t" 7797 "TWI $dst\n\t" 7798 "ISYNC" %} 7799 size(12); 7800 ins_encode( enc_ld_ac(dst, mem) ); 7801 ins_pipe(pipe_class_memory); 7802 %} 7803 7804 // LoadP + CastP2L 7805 instruct loadP2X(iRegLdst dst, memoryAlg4 mem) %{ 7806 match(Set dst (CastP2X (LoadP mem))); 7807 predicate(_kids[0]->_leaf->as_Load()->is_unordered()); 7808 ins_cost(MEMORY_REF_COST); 7809 7810 format %{ "LD $dst, $mem \t// ptr + p2x" %} 7811 size(4); 7812 ins_encode( enc_ld(dst, mem) ); 7813 ins_pipe(pipe_class_memory); 7814 %} 7815 7816 // Load compressed klass pointer. 7817 instruct loadNKlass(iRegNdst dst, memory mem) %{ 7818 match(Set dst (LoadNKlass mem)); 7819 ins_cost(MEMORY_REF_COST); 7820 7821 format %{ "LWZ $dst, $mem \t// compressed klass ptr" %} 7822 size(4); 7823 ins_encode( enc_lwz(dst, mem) ); 7824 ins_pipe(pipe_class_memory); 7825 %} 7826 7827 //// Load compressed klass and decode it if narrow_klass_shift == 0. 7828 //// TODO: will narrow_klass_shift ever be 0? 7829 //instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{ 7830 // match(Set dst (DecodeNKlass (LoadNKlass mem))); 7831 // predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*); 7832 // ins_cost(MEMORY_REF_COST); 7833 // 7834 // format %{ "LWZ $dst, $mem \t// DecodeNKlass (unscaled)" %} 7835 // size(4); 7836 // ins_encode( enc_lwz(dst, mem) ); 7837 // ins_pipe(pipe_class_memory); 7838 //%} 7839 7840 // Load Klass Pointer 7841 instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{ 7842 match(Set dst (LoadKlass mem)); 7843 ins_cost(MEMORY_REF_COST); 7844 7845 format %{ "LD $dst, $mem \t// klass ptr" %} 7846 size(4); 7847 ins_encode( enc_ld(dst, mem) ); 7848 ins_pipe(pipe_class_memory); 7849 %} 7850 7851 // Load Float 7852 instruct loadF(regF dst, memory mem) %{ 7853 match(Set dst (LoadF mem)); 7854 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7855 ins_cost(MEMORY_REF_COST); 7856 7857 format %{ "LFS $dst, $mem" %} 7858 size(4); 7859 ins_encode( enc_lfs(dst, mem) ); 7860 ins_pipe(pipe_class_memory); 7861 %} 7862 7863 // Load Float acquire. 7864 instruct loadF_ac(regF dst, memory mem) %{ 7865 match(Set dst (LoadF mem)); 7866 ins_cost(3*MEMORY_REF_COST); 7867 7868 format %{ "LFS $dst, $mem \t// acquire\n\t" 7869 "FCMPU cr0, $dst, $dst\n\t" 7870 "BNE cr0, next\n" 7871 "next:\n\t" 7872 "ISYNC" %} 7873 size(16); 7874 ins_encode( enc_lfs_ac(dst, mem) ); 7875 ins_pipe(pipe_class_memory); 7876 %} 7877 7878 // Load Double - aligned 7879 instruct loadD(regD dst, memory mem) %{ 7880 match(Set dst (LoadD mem)); 7881 predicate(n->as_Load()->is_unordered() || followed_by_acquire(n)); 7882 ins_cost(MEMORY_REF_COST); 7883 7884 format %{ "LFD $dst, $mem" %} 7885 size(4); 7886 ins_encode( enc_lfd(dst, mem) ); 7887 ins_pipe(pipe_class_memory); 7888 %} 7889 7890 // Load Double - aligned acquire. 7891 instruct loadD_ac(regD dst, memory mem) %{ 7892 match(Set dst (LoadD mem)); 7893 ins_cost(3*MEMORY_REF_COST); 7894 7895 format %{ "LFD $dst, $mem \t// acquire\n\t" 7896 "FCMPU cr0, $dst, $dst\n\t" 7897 "BNE cr0, next\n" 7898 "next:\n\t" 7899 "ISYNC" %} 7900 size(16); 7901 ins_encode( enc_lfd_ac(dst, mem) ); 7902 ins_pipe(pipe_class_memory); 7903 %} 7904 7905 // Load Double - UNaligned 7906 instruct loadD_unaligned(regD dst, memory mem) %{ 7907 match(Set dst (LoadD_unaligned mem)); 7908 // predicate(...) // Unaligned_ac is not needed (and wouldn't make sense). 7909 ins_cost(MEMORY_REF_COST); 7910 7911 format %{ "LFD $dst, $mem" %} 7912 size(4); 7913 ins_encode( enc_lfd(dst, mem) ); 7914 ins_pipe(pipe_class_memory); 7915 %} 7916 7917 //----------Constants-------------------------------------------------------- 7918 7919 // Load MachConstantTableBase: add hi offset to global toc. 7920 // TODO: Handle hidden register r29 in bundler! 7921 instruct loadToc_hi(iRegLdst dst) %{ 7922 effect(DEF dst); 7923 ins_cost(DEFAULT_COST); 7924 7925 format %{ "ADDIS $dst, R29, DISP.hi \t// load TOC hi" %} 7926 size(4); 7927 ins_encode( enc_load_toc1(dst) ); 7928 ins_pipe(pipe_class_default); 7929 %} 7930 7931 // Load MachConstantTableBase: add lo offset to global toc. 7932 instruct loadToc_lo(iRegLdst dst, iRegLdst src) %{ 7933 effect(DEF dst, USE src); 7934 ins_cost(DEFAULT_COST); 7935 7936 format %{ "ADDI $dst, $src, DISP.lo \t// load TOC lo" %} 7937 size(4); 7938 ins_encode( enc_load_toc2(dst, src) ); 7939 ins_pipe(pipe_class_default); 7940 %} 7941 7942 // Load 16-bit integer constant 0xssss???? 7943 instruct loadConI16(iRegIdst dst, immI16 src) %{ 7944 match(Set dst src); 7945 7946 format %{ "LI $dst, $src" %} 7947 size(4); 7948 ins_encode( enc_li(dst, src) ); 7949 ins_pipe(pipe_class_default); 7950 %} 7951 7952 // Load integer constant 0x????0000 7953 instruct loadConIhi16(iRegIdst dst, immIhi16 src) %{ 7954 match(Set dst src); 7955 ins_cost(DEFAULT_COST); 7956 7957 format %{ "LIS $dst, $src.hi" %} 7958 size(4); 7959 ins_encode( enc_lis_32(dst, src) ); 7960 ins_pipe(pipe_class_default); 7961 %} 7962 7963 // Part 2 of loading 32 bit constant: hi16 is is src1 (properly shifted 7964 // and sign extended), this adds the low 16 bits. 7965 instruct loadConI32_lo16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 7966 // no match-rule, false predicate 7967 effect(DEF dst, USE src1, USE src2); 7968 predicate(false); 7969 7970 format %{ "ORI $dst, $src1.hi, $src2.lo" %} 7971 size(4); 7972 ins_encode( enc_ori(dst, src1, src2) ); 7973 ins_pipe(pipe_class_default); 7974 %} 7975 7976 instruct loadConI_Ex(iRegIdst dst, immI src) %{ 7977 match(Set dst src); 7978 ins_cost(DEFAULT_COST*2); 7979 7980 expand %{ 7981 // Would like to use $src$$constant. 7982 immI16 srcLo %{ _opnds[1]->constant() %} 7983 // srcHi can be 0000 if srcLo sign-extends to a negative number. 7984 immIhi16 srcHi %{ _opnds[1]->constant() %} 7985 iRegIdst tmpI; 7986 loadConIhi16(tmpI, srcHi); 7987 loadConI32_lo16(dst, tmpI, srcLo); 7988 %} 7989 %} 7990 7991 // No constant pool entries required. 7992 instruct loadConL16(iRegLdst dst, immL16 src) %{ 7993 match(Set dst src); 7994 7995 format %{ "LI $dst, $src \t// long" %} 7996 size(4); 7997 ins_encode( enc_li(dst, src) ); 7998 ins_pipe(pipe_class_default); 7999 %} 8000 8001 // Load long constant 0xssssssss????0000 8002 instruct loadConL32hi16(iRegLdst dst, immL32hi16 src) %{ 8003 match(Set dst src); 8004 ins_cost(DEFAULT_COST); 8005 8006 format %{ "LIS $dst, $src.hi \t// long" %} 8007 size(4); 8008 ins_encode( enc_lis_32(dst, src) ); 8009 ins_pipe(pipe_class_default); 8010 %} 8011 8012 // To load a 32 bit constant: merge lower 16 bits into already loaded 8013 // high 16 bits. 8014 instruct loadConL32_lo16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{ 8015 // no match-rule, false predicate 8016 effect(DEF dst, USE src1, USE src2); 8017 predicate(false); 8018 8019 format %{ "ORI $dst, $src1, $src2.lo" %} 8020 size(4); 8021 ins_encode( enc_ori(dst, src1, src2) ); 8022 ins_pipe(pipe_class_default); 8023 %} 8024 8025 // Load 32-bit long constant 8026 instruct loadConL32_Ex(iRegLdst dst, immL32 src) %{ 8027 match(Set dst src); 8028 ins_cost(DEFAULT_COST*2); 8029 8030 expand %{ 8031 // Would like to use $src$$constant. 8032 immL16 srcLo %{ _opnds[1]->constant() /*& 0x0000FFFFL */%} 8033 // srcHi can be 0000 if srcLo sign-extends to a negative number. 8034 immL32hi16 srcHi %{ _opnds[1]->constant() /*& 0xFFFF0000L */%} 8035 iRegLdst tmpL; 8036 loadConL32hi16(tmpL, srcHi); 8037 loadConL32_lo16(dst, tmpL, srcLo); 8038 %} 8039 %} 8040 8041 // Load long constant 0x????000000000000. 8042 instruct loadConLhighest16_Ex(iRegLdst dst, immLhighest16 src) %{ 8043 match(Set dst src); 8044 ins_cost(DEFAULT_COST); 8045 8046 expand %{ 8047 immL32hi16 srcHi %{ _opnds[1]->constant() >> 32 /*& 0xFFFF0000L */%} 8048 immI shift32 %{ 32 %} 8049 iRegLdst tmpL; 8050 loadConL32hi16(tmpL, srcHi); 8051 lshiftL_regL_immI(dst, tmpL, shift32); 8052 %} 8053 %} 8054 8055 // Expand node for constant pool load: small offset. 8056 instruct loadConL(iRegLdst dst, immL src, iRegLdst toc, immI isoop) %{ 8057 effect(DEF dst, USE src, USE toc, USE isoop); 8058 ins_cost(MEMORY_REF_COST); 8059 8060 ins_num_consts(1); 8061 // Needed so that CallDynamicJavaDirect can compute the address of this 8062 // instruction for relocation. 8063 ins_field_cbuf_insts_offset(int); 8064 8065 format %{ "LD $dst, offset, $toc \t// load long(isoop=$isoop) $src from TOC" %} 8066 size(4); 8067 ins_encode( enc_load_long_constL(dst, src, toc, isoop) ); 8068 ins_pipe(pipe_class_memory); 8069 %} 8070 8071 // Expand node for constant pool load: large offset. 8072 instruct loadConL_hi(iRegLdst dst, immL src, iRegLdst toc, immI isoop) %{ 8073 effect(DEF dst, USE src, USE toc, USE isoop); 8074 predicate(false); 8075 8076 ins_num_consts(1); 8077 ins_field_const_toc_offset(int); 8078 // Needed so that CallDynamicJavaDirect can compute the address of this 8079 // instruction for relocation. 8080 ins_field_cbuf_insts_offset(int); 8081 8082 format %{ "ADDIS $dst, $toc, offset \t// load long(isoop=$isoop) $src from TOC (hi)" %} 8083 size(4); 8084 ins_encode( enc_load_long_constL_hi(dst, toc, src, isoop) ); 8085 ins_pipe(pipe_class_default); 8086 %} 8087 8088 // Expand node for constant pool load: large offset. 8089 instruct loadConL_lo(iRegLdst dst, immL src, iRegLdst base, immI isoop) %{ 8090 effect(DEF dst, USE src, USE base, USE isoop); 8091 predicate(false); 8092 8093 ins_field_const_toc_offset_hi_node(loadConL_hiNode*); 8094 8095 format %{ "LD $dst, offset, $base \t// load long(isoop=$isoop) $src from TOC (lo)" %} 8096 size(4); 8097 ins_encode( enc_load_long_const_lo(dst, src, base, isoop) ); 8098 ins_pipe(pipe_class_memory); 8099 %} 8100 8101 // Load long constant from constant table. Expand in case of 8102 // offset > 16 bit is needed. 8103 // Adlc adds toc node MachConstantTableBase. 8104 instruct loadConL_Ex(iRegLdst dst, immL src) %{ 8105 match(Set dst src); 8106 ins_cost(MEMORY_REF_COST); 8107 8108 format %{ "LD $dst, offset, $constanttablebase\t// load long $src from table, late expanded" %} 8109 lateExpand( lateExpand_load_long_constant(dst, src, constanttablebase) ); 8110 %} 8111 8112 // Load NULL as compressed oop. 8113 instruct loadConN0(iRegNdst dst, immN_0 src) %{ 8114 match(Set dst src); 8115 ins_cost(DEFAULT_COST); 8116 8117 format %{ "LI $dst, $src \t// compressed ptr" %} 8118 size(4); 8119 ins_encode( enc_li(dst, src) ); 8120 ins_pipe(pipe_class_default); 8121 %} 8122 8123 // Load hi part of compressed oop constant. 8124 instruct loadConN1(iRegNdst dst, immN src) %{ 8125 effect(DEF dst, USE src); 8126 ins_cost(DEFAULT_COST); 8127 8128 format %{ "LIS $dst, $src \t// narrow oop hi" %} 8129 size(4); 8130 ins_encode( enc_load_con_narrow1(dst, src) ); 8131 ins_pipe(pipe_class_default); 8132 %} 8133 8134 // Add lo part of compressed oop constant to already loaded hi part. 8135 instruct loadConN2(iRegNdst dst, iRegNsrc src1, immN src2) %{ 8136 effect(DEF dst, USE src1, USE src2); 8137 ins_cost(DEFAULT_COST); 8138 8139 format %{ "ADDI $dst, $src1, $src2 \t// narrow oop lo" %} 8140 size(4); 8141 ins_encode( enc_load_con_narrow2(dst, src1, src2) ); 8142 ins_pipe(pipe_class_default); 8143 %} 8144 8145 // Needed to late expand loadConN: ConN is loaded as ConI 8146 // leaving the upper 32 bits with sign-extension bits. 8147 // This clears these bits: dst = src & 0xFFFFFFFF. 8148 // TODO: Eventually call this maskN_regN_FFFFFFFF. 8149 instruct clearMs32b(iRegNdst dst, iRegNsrc src) %{ 8150 effect(DEF dst, USE src); 8151 predicate(false); 8152 8153 format %{ "MASK $dst, $src, 0xFFFFFFFF" %} // mask 8154 size(4); 8155 ins_encode( enc_clrldi(dst, src, 0x20) ); 8156 ins_pipe(pipe_class_default); 8157 %} 8158 8159 // Loading ConN must be late expanded so that edges between 8160 // the nodes are safe. They may not interfere with a safepoint. 8161 // GL TODO: This needs three instructions: better put this into the constant pool. 8162 instruct loadConN_Ex(iRegNdst dst, immN src) %{ 8163 match(Set dst src); 8164 ins_cost(DEFAULT_COST*2); 8165 8166 format %{ "LoadN $dst, $src \t// late expanded" %} // mask 8167 lateExpand( lateExpand_load_conN(dst, src) ); 8168 %} 8169 8170 // 0x1 is used in object initialization (initial object header). 8171 // No constant pool entries required. 8172 instruct loadConP0or1(iRegPdst dst, immP_0or1 src) %{ 8173 match(Set dst src); 8174 8175 format %{ "LI $dst, $src \t// ptr" %} 8176 size(4); 8177 ins_encode( enc_li(dst, src) ); 8178 ins_pipe(pipe_class_default); 8179 %} 8180 8181 // Expand node for constant pool load: small offset. 8182 // The match rule is needed to generate the correct bottom_type(), 8183 // however this node should never match. The use of predicate is not 8184 // possible since ADLC forbids predicates for chain rules. The higher 8185 // costs do not prevent matching in this case. For that reason the 8186 // operand immP_NM with predicate(false) is used. 8187 instruct loadConP(iRegPdst dst, immP_NM src, iRegLdst toc, immI isoop) %{ 8188 match(Set dst src); 8189 effect(TEMP toc, USE isoop); 8190 8191 ins_num_consts(1); 8192 8193 format %{ "LD $dst, offset, $toc \t// load ptr(isoop=$isoop) $src from TOC" %} 8194 size(4); 8195 ins_encode( enc_load_long_constP(dst, src, toc, isoop) ); 8196 ins_pipe(pipe_class_memory); 8197 %} 8198 8199 // Expand node for constant pool load: large offset. 8200 instruct loadConP_hi(iRegPdst dst, immP_NM src, iRegLdst toc, immI isoop) %{ 8201 effect(DEF dst, USE src, USE toc, USE isoop); 8202 predicate(false); 8203 8204 ins_num_consts(1); 8205 ins_field_const_toc_offset(int); 8206 8207 format %{ "ADDIS $dst, $toc, offset \t// load ptr(isoop=$isoop) $src from TOC (hi)" %} 8208 size(4); 8209 ins_encode( enc_load_long_constP_hi(dst, toc, src, isoop) ); 8210 ins_pipe(pipe_class_default); 8211 %} 8212 8213 // Expand node for constant pool load: large offset. 8214 instruct loadConP_lo(iRegPdst dst, immP_NM src, iRegLdst base, immI isoop) %{ 8215 match(Set dst src); 8216 effect(TEMP base, USE isoop); 8217 8218 ins_field_const_toc_offset_hi_node(loadConP_hiNode*); 8219 8220 format %{ "LD $dst, offset, $base \t// load ptr(isoop=$isoop) $src from TOC (lo)" %} 8221 size(4); 8222 ins_encode( enc_load_long_const_lo(dst, src, base, isoop) ); 8223 ins_pipe(pipe_class_memory); 8224 %} 8225 8226 // Load pointer constant from constant table. Expand in case an 8227 // offset > 16 bit is needed. 8228 // Adlc adds toc node MachConstantTableBase. 8229 instruct loadConP_Ex(iRegPdst dst, immP src) %{ 8230 match(Set dst src); 8231 ins_cost(MEMORY_REF_COST); 8232 8233 // This rule does not use "expand" because then 8234 // the result type is not known to be an Oop. An ADLC 8235 // enhancement will be needed to make that work - not worth it! 8236 8237 // If this instruction rematerializes, it prolongs the live range 8238 // of the toc node, causing illegal graphs. 8239 // assert(edge_from_to(_reg_node[reg_lo],def)) fails in verify_good_schedule(). 8240 ins_cannot_rematerialize(true); 8241 8242 format %{ "LD $dst, offset, $constanttablebase \t// load ptr $src from table, late expanded" %} 8243 lateExpand( lateExpand_load_ptr_constant(dst, src, constanttablebase) ); 8244 %} 8245 8246 // Expand node for constant pool load: small offset. 8247 instruct loadConF(regF dst, immF src, iRegLdst toc) %{ 8248 effect(DEF dst, USE src, USE toc); 8249 ins_cost(MEMORY_REF_COST); 8250 8251 ins_num_consts(1); 8252 8253 format %{ "LFS $dst, offset, $toc \t// load float $src from TOC" %} 8254 size(4); 8255 ins_encode( enc_load_float_const(dst, src, toc) ); 8256 ins_pipe(pipe_class_memory); 8257 %} 8258 8259 // Expand node for constant pool load: large offset. 8260 instruct loadConFComp(regF dst, immF src, iRegLdst toc) %{ 8261 effect(DEF dst, USE src, USE toc); 8262 ins_cost(MEMORY_REF_COST); 8263 8264 ins_num_consts(1); 8265 8266 format %{ "ADDIS $toc, $toc, offset_hi\n\t" 8267 "LFS $dst, offset_lo, $toc \t// load float $src from TOC (hi/lo)\n\t" 8268 "ADDIS $toc, $toc, -offset_hi"%} 8269 size(12); 8270 ins_encode( enc_load_float_const_comp(dst, src, toc) ); 8271 ins_pipe(pipe_class_memory); 8272 %} 8273 8274 // Adlc adds toc node MachConstantTableBase. 8275 instruct loadConF_Ex(regF dst, immF src) %{ 8276 match(Set dst src); 8277 ins_cost(MEMORY_REF_COST); 8278 8279 // See loadConP. 8280 ins_cannot_rematerialize(true); 8281 8282 format %{ "LFS $dst, offset, $constanttablebase \t// load $src from table, late expanded" %} 8283 lateExpand( lateExpand_load_float_constant(dst, src, constanttablebase) ); 8284 %} 8285 8286 // Expand node for constant pool load: small offset. 8287 instruct loadConD(regD dst, immD src, iRegLdst toc) %{ 8288 effect(DEF dst, USE src, USE toc); 8289 ins_cost(MEMORY_REF_COST); 8290 8291 ins_num_consts(1); 8292 8293 format %{ "LFD $dst, offset, $toc \t// load double $src from TOC" %} 8294 size(4); 8295 ins_encode( enc_load_double_const(dst, src, toc) ); 8296 ins_pipe(pipe_class_memory); 8297 %} 8298 8299 // Expand node for constant pool load: large offset. 8300 instruct loadConDComp(regD dst, immD src, iRegLdst toc) %{ 8301 effect(DEF dst, USE src, USE toc); 8302 ins_cost(MEMORY_REF_COST); 8303 8304 ins_num_consts(1); 8305 8306 format %{ "ADDIS $toc, $toc, offset_hi\n\t" 8307 "LFD $dst, offset_lo, $toc \t// load double $src from TOC (hi/lo)\n\t" 8308 "ADDIS $toc, $toc, -offset_hi" %} 8309 size(12); 8310 ins_encode( enc_load_double_const_comp(dst, src, toc) ); 8311 ins_pipe(pipe_class_memory); 8312 %} 8313 8314 // Adlc adds toc node MachConstantTableBase. 8315 instruct loadConD_Ex(regD dst, immD src) %{ 8316 match(Set dst src); 8317 ins_cost(MEMORY_REF_COST); 8318 8319 // See loadConP. 8320 ins_cannot_rematerialize(true); 8321 8322 format %{ "ConD $dst, offset, $constanttablebase \t// load $src from table, late expanded" %} 8323 lateExpand( lateExpand_load_double_constant(dst, src, constanttablebase) ); 8324 %} 8325 8326 // Prefetch instructions. 8327 // Must be safe to execute with invalid address (cannot fault). 8328 8329 instruct prefetchr(indirectMemory mem, iRegLsrc src) %{ 8330 match(PrefetchRead (AddP mem src)); 8331 ins_cost(MEMORY_REF_COST); 8332 8333 format %{ "PREFETCH $mem, 0, $src \t// Prefetch read-many" %} 8334 size(4); 8335 ins_encode( enc_mem_load_prefetch(mem, src) ); 8336 ins_pipe(pipe_class_memory); 8337 %} 8338 8339 instruct prefetchr_no_offset(indirectMemory mem) %{ 8340 match(PrefetchRead mem); 8341 ins_cost(MEMORY_REF_COST); 8342 8343 format %{ "PREFETCH $mem" %} 8344 size(4); 8345 ins_encode( enc_mem_load_prefetch_no_offset(mem) ); 8346 ins_pipe(pipe_class_memory); 8347 %} 8348 8349 instruct prefetchw(indirectMemory mem, iRegLsrc src) %{ 8350 match(PrefetchWrite (AddP mem src)); 8351 ins_cost(MEMORY_REF_COST); 8352 8353 format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many (and read)" %} 8354 size(4); 8355 ins_encode( enc_mem_store_prefetch(mem, src) ); 8356 ins_pipe(pipe_class_memory); 8357 %} 8358 8359 instruct prefetchw_no_offset(indirectMemory mem) %{ 8360 match(PrefetchWrite mem); 8361 ins_cost(MEMORY_REF_COST); 8362 8363 format %{ "PREFETCH $mem" %} 8364 size(4); 8365 ins_encode( enc_mem_store_prefetch_no_offset(mem) ); 8366 ins_pipe(pipe_class_memory); 8367 %} 8368 8369 // Special prefetch versions which use the dcbz instruction. 8370 instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{ 8371 match(PrefetchAllocation (AddP mem src)); 8372 predicate(AllocatePrefetchStyle==3); 8373 ins_cost(MEMORY_REF_COST); 8374 8375 format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %} 8376 size(4); 8377 ins_encode( enc_mem_store_prefetch_zero(mem, src) ); 8378 ins_pipe(pipe_class_memory); 8379 %} 8380 8381 instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{ 8382 match(PrefetchAllocation mem); 8383 predicate(AllocatePrefetchStyle==3); 8384 ins_cost(MEMORY_REF_COST); 8385 8386 format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %} 8387 size(4); 8388 ins_encode( enc_mem_store_prefetch_zero_no_offset(mem) ); 8389 ins_pipe(pipe_class_memory); 8390 %} 8391 8392 instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{ 8393 match(PrefetchAllocation (AddP mem src)); 8394 predicate(AllocatePrefetchStyle!=3); 8395 ins_cost(MEMORY_REF_COST); 8396 8397 format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %} 8398 size(4); 8399 ins_encode( enc_mem_store_prefetch(mem, src) ); 8400 ins_pipe(pipe_class_memory); 8401 %} 8402 8403 instruct prefetch_alloc_no_offset(indirectMemory mem) %{ 8404 match(PrefetchAllocation mem); 8405 predicate(AllocatePrefetchStyle!=3); 8406 ins_cost(MEMORY_REF_COST); 8407 8408 format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %} 8409 size(4); 8410 ins_encode( enc_mem_store_prefetch_no_offset(mem) ); 8411 ins_pipe(pipe_class_memory); 8412 %} 8413 8414 //----------Store Instructions------------------------------------------------- 8415 8416 // Store Byte 8417 instruct storeB(memory mem, iRegIsrc src) %{ 8418 match(Set mem (StoreB mem src)); 8419 ins_cost(MEMORY_REF_COST); 8420 8421 format %{ "STB $src, $mem \t// byte" %} 8422 size(4); 8423 ins_encode( enc_stb(src, mem) ); 8424 ins_pipe(pipe_class_memory); 8425 %} 8426 8427 // Store Char/Short 8428 instruct storeC(memory mem, iRegIsrc src) %{ 8429 match(Set mem (StoreC mem src)); 8430 ins_cost(MEMORY_REF_COST); 8431 8432 format %{ "STH $src, $mem \t// short" %} 8433 size(4); 8434 ins_encode( enc_sth(src, mem) ); 8435 ins_pipe(pipe_class_memory); 8436 %} 8437 8438 // Store Integer 8439 instruct storeI(memory mem, iRegIsrc src) %{ 8440 match(Set mem (StoreI mem src)); 8441 ins_cost(MEMORY_REF_COST); 8442 8443 format %{ "STW $src, $mem" %} 8444 size(4); 8445 ins_encode( enc_stw(src, mem) ); 8446 ins_pipe(pipe_class_memory); 8447 %} 8448 8449 // ConvL2I + StoreI. 8450 instruct storeI_convL2I(memory mem, iRegLsrc src) %{ 8451 match(Set mem (StoreI mem (ConvL2I src))); 8452 ins_cost(MEMORY_REF_COST); 8453 8454 format %{ "STW l2i($src), $mem" %} 8455 size(4); 8456 ins_encode( enc_stw(src, mem) ); 8457 ins_pipe(pipe_class_memory); 8458 %} 8459 8460 // Store Long 8461 instruct storeL(memoryAlg4 mem, iRegLsrc src) %{ 8462 match(Set mem (StoreL mem src)); 8463 ins_cost(MEMORY_REF_COST); 8464 8465 format %{ "STD $src, $mem \t// long" %} 8466 size(4); 8467 ins_encode( enc_std(src, mem) ); 8468 ins_pipe(pipe_class_memory); 8469 %} 8470 8471 // Store super word nodes. 8472 8473 // Store Aligned Packed Byte long register to memory 8474 instruct storeA8B(memoryAlg4 mem, iRegLsrc src) %{ 8475 predicate(n->as_StoreVector()->memory_size() == 8); 8476 match(Set mem (StoreVector mem src)); 8477 ins_cost(MEMORY_REF_COST); 8478 8479 format %{ "STD $mem, $src \t// packed8B" %} 8480 size(4); 8481 ins_encode( enc_std(src, mem) ); 8482 ins_pipe(pipe_class_memory); 8483 %} 8484 8485 // Store Compressed Oop 8486 instruct storeN(memory dst, iRegN_P2N src) %{ 8487 match(Set dst (StoreN dst src)); 8488 ins_cost(MEMORY_REF_COST); 8489 8490 format %{ "STW $src, $dst \t// compressed oop" %} 8491 size(4); 8492 ins_encode( enc_stw(src, dst) ); 8493 ins_pipe(pipe_class_memory); 8494 %} 8495 8496 // Store Pointer 8497 instruct storeP(memoryAlg4 dst, iRegPsrc src) %{ 8498 match(Set dst (StoreP dst src)); 8499 ins_cost(MEMORY_REF_COST); 8500 8501 format %{ "STD $src, $dst \t// ptr" %} 8502 size(4); 8503 ins_encode( enc_std(src, dst) ); 8504 ins_pipe(pipe_class_memory); 8505 %} 8506 8507 // Store Float 8508 instruct storeF(memory mem, regF src) %{ 8509 match(Set mem (StoreF mem src)); 8510 ins_cost(MEMORY_REF_COST); 8511 8512 format %{ "STFS $src, $mem" %} 8513 size(4); 8514 ins_encode( enc_stfs(src, mem) ); 8515 ins_pipe(pipe_class_memory); 8516 %} 8517 8518 // Store Double 8519 instruct storeD(memory mem, regD src) %{ 8520 match(Set mem (StoreD mem src)); 8521 ins_cost(MEMORY_REF_COST); 8522 8523 format %{ "STFD $src, $mem" %} 8524 size(4); 8525 ins_encode( enc_stfd(src, mem) ); 8526 ins_pipe(pipe_class_memory); 8527 %} 8528 8529 //----------Store Instructions With Zeros-------------------------------------- 8530 8531 // Card-mark for CMS garbage collection. 8532 // This cardmark does an optimization so that it must not always 8533 // do a releasing store. For this, it gets the address of 8534 // CMSCollectorCardTableModRefBSExt::_requires_release as input. 8535 // (Using releaseFieldAddr in the match rule is a hack.) 8536 instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr) %{ 8537 match(Set mem (StoreCM mem releaseFieldAddr)); 8538 predicate(false); 8539 ins_cost(MEMORY_REF_COST); 8540 8541 // See loadConP. 8542 ins_cannot_rematerialize(true); 8543 8544 format %{ "STB #0, $mem \t// CMS card-mark byte (must be 0!), checking requires_release in [$releaseFieldAddr]" %} 8545 ins_encode( enc_cms_card_mark(mem, releaseFieldAddr) ); 8546 ins_pipe(pipe_class_memory); 8547 %} 8548 8549 // Card-mark for CMS garbage collection. 8550 // This cardmark does an optimization so that it must not always 8551 // do a releasing store. For this, it needs the constant address of 8552 // CMSCollectorCardTableModRefBSExt::_requires_release. 8553 // This constant address is split off here by expand so we can use 8554 // adlc / matcher functionality to load it from the constant section. 8555 instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{ 8556 match(Set mem (StoreCM mem zero)); 8557 predicate(UseConcMarkSweepGC); 8558 8559 expand %{ 8560 immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableModRefBSExt::requires_release_address() */ %} 8561 iRegLdst releaseFieldAddress; 8562 loadConL_Ex(releaseFieldAddress, baseImm); 8563 storeCM_CMS(mem, releaseFieldAddress); 8564 %} 8565 %} 8566 8567 instruct storeCM_G1(memory mem, immI_0 zero) %{ 8568 match(Set mem (StoreCM mem zero)); 8569 predicate(UseG1GC); 8570 ins_cost(MEMORY_REF_COST); 8571 8572 ins_cannot_rematerialize(true); 8573 8574 format %{ "STB #0, $mem \t// CMS card-mark byte store (G1)" %} 8575 size(8); 8576 ins_encode( enc_stb0(mem) ); 8577 ins_pipe(pipe_class_memory); 8578 %} 8579 8580 // Convert oop pointer into compressed form. 8581 8582 // Nodes for late expand. 8583 8584 // Shift node for expand. 8585 instruct encodeP_shift(iRegNdst dst, iRegNsrc src) %{ 8586 // The match rule is needed to make it a 'MachTypeNode'! 8587 match(Set dst (EncodeP src)); 8588 predicate(false); 8589 8590 format %{ "SRDI $dst, $src, 3 \t// encode" %} 8591 size(4); 8592 ins_encode( enc_srdi(dst, src, (Universe::narrow_oop_shift())) ); 8593 ins_pipe(pipe_class_default); 8594 %} 8595 8596 // Add node for expand. 8597 instruct encodeP_sub(iRegPdst dst, iRegPdst src) %{ 8598 // The match rule is needed to make it a 'MachTypeNode'! 8599 match(Set dst (EncodeP src)); 8600 predicate(false); 8601 8602 format %{ "SUB $dst, $src, oop_base \t// encode" %} 8603 size(4); 8604 ins_encode( enc_subf(dst, R30, src) ); 8605 ins_pipe(pipe_class_default); 8606 %} 8607 8608 // Conditional sub base. 8609 instruct cond_sub_base(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{ 8610 // The match rule is needed to make it a 'MachTypeNode'! 8611 match(Set dst (EncodeP (Binary crx src1))); 8612 predicate(false); 8613 8614 ins_variable_size_depending_on_alignment(true); 8615 8616 format %{ "BEQ $crx, done\n\t" 8617 "SUB $dst, $src1, R30 \t// encode: subtract base if != NULL\n" 8618 "done:" %} 8619 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 8620 ins_encode( enc_cond_sub_base(dst,crx, src1) ); 8621 ins_pipe(pipe_class_default); 8622 %} 8623 8624 // Power 7 can use isel instruction 8625 instruct cond_set_0_oop(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{ 8626 // The match rule is needed to make it a 'MachTypeNode'! 8627 match(Set dst (EncodeP (Binary crx src1))); 8628 predicate(false); 8629 8630 format %{ "CMOVE $dst, $crx eq, 0, $src1 \t// encode: preserve 0" %} 8631 size(4); 8632 ins_encode( enc_isel_set_to_0_or_value(dst, crx, src1) ); 8633 ins_pipe(pipe_class_default); 8634 %} 8635 8636 // base != 0 8637 // 32G aligned narrow oop base. 8638 instruct encodeP_32GAligned(iRegNdst dst, flagsReg crx, iRegPsrc src) %{ 8639 match(Set dst (EncodeP src)); 8640 predicate(false /* TODO: PPC port Universe::narrow_oop_base_disjoint()*/); 8641 8642 format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with 32G aligned base" %} 8643 size(4); 8644 ins_encode( enc_extrdi_encode(dst, src) ); 8645 ins_pipe(pipe_class_default); 8646 %} 8647 8648 // shift != 0, base != 0 8649 instruct encodeP_Ex(iRegNdst dst, flagsReg crx, iRegPsrc src) %{ 8650 match(Set dst (EncodeP src)); 8651 effect(TEMP crx); 8652 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull && 8653 Universe::narrow_oop_shift() != 0 && 8654 true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/); 8655 8656 format %{ "EncodeP $dst, $crx, $src \t// late expanded" %} 8657 lateExpand( lateExpand_encode_oop(dst, src, crx)); 8658 %} 8659 8660 // shift != 0, base != 0 8661 instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{ 8662 match(Set dst (EncodeP src)); 8663 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull && 8664 Universe::narrow_oop_shift() != 0 && 8665 true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/); 8666 8667 format %{ "EncodeP $dst, $src\t// $src != Null, late expanded" %} 8668 lateExpand( lateExpand_encode_oop_not_null(dst, src) ); 8669 %} 8670 8671 // shift != 0, base == 0 8672 instruct encodeP_not_null_base_null(iRegNdst dst, iRegPsrc src) %{ 8673 match(Set dst (EncodeP src)); 8674 predicate(Universe::narrow_oop_shift() != 0 && 8675 Universe::narrow_oop_base() ==0); 8676 8677 format %{ "SRDI $dst, $src, #3 \t// encodeP, $src != NULL" %} 8678 size(4); 8679 ins_encode( enc_srdi(dst, src, (Universe::narrow_oop_shift())) ); 8680 ins_pipe(pipe_class_default); 8681 %} 8682 8683 // Compressed OOPs with narrow_oop_shift == 0. 8684 // shift == 0, base == 0 8685 instruct encodeP_narrow_oop_shift_0(iRegNdst dst, iRegPsrc src) %{ 8686 match(Set dst (EncodeP src)); 8687 predicate(Universe::narrow_oop_shift() == 0); 8688 8689 format %{ "MR $dst, $src \t// Ptr->Narrow" %} 8690 // variable size, 0 or 4. 8691 ins_encode( enc_mr_if_needed(dst, src) ); 8692 ins_pipe(pipe_class_default); 8693 %} 8694 8695 // Decode nodes. 8696 8697 // Shift node for expand. 8698 instruct decodeN_shift(iRegPdst dst, iRegPsrc src) %{ 8699 // The match rule is needed to make it a 'MachTypeNode'! 8700 match(Set dst (DecodeN src)); 8701 predicate(false); 8702 8703 format %{ "SLDI $dst, $src, #3 \t// DecodeN" %} 8704 size(4); 8705 ins_encode( enc_sldi(dst, src, (Universe::narrow_oop_shift())) ); 8706 ins_pipe(pipe_class_default); 8707 %} 8708 8709 // Add node for expand. 8710 instruct decodeN_add(iRegPdst dst, iRegPdst src) %{ 8711 // The match rule is needed to make it a 'MachTypeNode'! 8712 match(Set dst (DecodeN src)); 8713 predicate(false); 8714 8715 format %{ "ADD $dst, $src, R30 \t// DecodeN, add oop base" %} 8716 size(4); 8717 ins_encode( enc_add(dst, src, R30) ); 8718 ins_pipe(pipe_class_default); 8719 %} 8720 8721 // conditianal add base for expand 8722 instruct cond_add_base(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{ 8723 // The match rule is needed to make it a 'MachTypeNode'! 8724 // NOTICE that the rule is nonsense - we just have to make sure that: 8725 // - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp) 8726 // - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC. 8727 match(Set dst (DecodeN (Binary crx src1))); 8728 predicate(false); 8729 8730 ins_variable_size_depending_on_alignment(true); 8731 8732 format %{ "BEQ $crx, done\n\t" 8733 "ADD $dst, $src1, R30 \t// DecodeN: add oop base if $src1 != NULL\n" 8734 "done:" %} 8735 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 8736 ins_encode( enc_cond_add_base(dst, crx, src1) ); 8737 ins_pipe(pipe_class_default); 8738 %} 8739 8740 instruct cond_set_0_ptr(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{ 8741 // The match rule is needed to make it a 'MachTypeNode'! 8742 // NOTICE that the rule is nonsense - we just have to make sure that: 8743 // - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp) 8744 // - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC. 8745 match(Set dst (DecodeN (Binary crx src1))); 8746 predicate(false); 8747 8748 format %{ "CMOVE $dst, $crx eq, 0, $src1 \t// decode: preserve 0" %} 8749 size(4); 8750 ins_encode( enc_isel_set_to_0_or_value(dst, crx, src1) ); 8751 ins_pipe(pipe_class_default); 8752 %} 8753 8754 // shift != 0, base != 0 8755 instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{ 8756 match(Set dst (DecodeN src)); 8757 predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 8758 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) && 8759 Universe::narrow_oop_shift() != 0 && 8760 Universe::narrow_oop_base() != 0); 8761 effect(TEMP crx); 8762 8763 format %{ "DecodeN $dst, $src \t// Kills $crx, late expanded" %} 8764 lateExpand( lateExpand_decode_oop(dst, src, crx) ); 8765 %} 8766 8767 // shift != 0, base == 0 8768 instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{ 8769 match(Set dst (DecodeN src)); 8770 predicate(Universe::narrow_oop_shift() != 0 && 8771 Universe::narrow_oop_base() == 0); 8772 8773 format %{ "SLDI $dst, $src, #3 \t// DecodeN (zerobased)" %} 8774 size(4); 8775 ins_encode( enc_sldi(dst, src, (Universe::narrow_oop_shift()))); 8776 ins_pipe(pipe_class_default); 8777 %} 8778 8779 // src != 0, shift != 0, base != 0 8780 instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{ 8781 match(Set dst (DecodeN src)); 8782 predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 8783 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) && 8784 Universe::narrow_oop_shift() != 0 && 8785 Universe::narrow_oop_base() != 0); 8786 8787 format %{ "DecodeN $dst, $src \t// $src != NULL, late expanded" %} 8788 lateExpand( lateExpand_decode_oop_not_null(dst, src)); 8789 %} 8790 8791 // Compressed OOPs with narrow_oop_shift == 0. 8792 instruct decodeN_unscaled(iRegPdst dst, iRegNsrc src) %{ 8793 match(Set dst (DecodeN src)); 8794 predicate(Universe::narrow_oop_shift() == 0); 8795 ins_cost(DEFAULT_COST); 8796 8797 format %{ "MR $dst, $src \t// DecodeN (unscaled)" %} 8798 // variable size, 0 or 4. 8799 ins_encode( enc_mr_if_needed(dst, src) ); 8800 ins_pipe(pipe_class_default); 8801 %} 8802 8803 // Convert compressed oop into int for vectors alignment masking. 8804 instruct decodeN2I_unscaled(iRegIdst dst, iRegNsrc src) %{ 8805 match(Set dst (ConvL2I (CastP2X (DecodeN src)))); 8806 predicate(Universe::narrow_oop_shift() == 0); 8807 ins_cost(DEFAULT_COST); 8808 8809 format %{ "MR $dst, $src \t// (int)DecodeN (unscaled)" %} 8810 // variable size, 0 or 4. 8811 ins_encode( enc_mr_if_needed(dst, src) ); 8812 ins_pipe(pipe_class_default); 8813 %} 8814 8815 //----------MemBar Instructions----------------------------------------------- 8816 // Memory barrier flavors 8817 8818 // We don't need this as we do load.acq. 8819 //instruct membar_acquire() %{ 8820 // match(MemBarAcquire); 8821 // ins_cost(4*MEMORY_REF_COST); 8822 // 8823 // format %{ "MEMBAR-acquire" %} 8824 // size(4); 8825 // ins_encode( enc_membar_acquire ); 8826 // ins_pipe(pipe_class_default); 8827 //%} 8828 8829 instruct unnecessary_membar_acquire() %{ 8830 match(MemBarAcquire); 8831 ins_cost(0); 8832 8833 format %{ " -- \t// redundant MEMBAR-acquire - empty" %} 8834 size(0); 8835 ins_encode( /*empty*/ ); 8836 ins_pipe(pipe_class_default); 8837 %} 8838 8839 instruct membar_acquire_lock() %{ 8840 match(MemBarAcquireLock); 8841 ins_cost(0); 8842 8843 format %{ " -- \t// redundant MEMBAR-acquire - empty (acquire as part of CAS in prior FastLock)" %} 8844 size(0); 8845 ins_encode( /*empty*/ ); 8846 ins_pipe(pipe_class_default); 8847 %} 8848 8849 instruct membar_release() %{ 8850 match(MemBarRelease); 8851 ins_cost(4*MEMORY_REF_COST); 8852 8853 format %{ "MEMBAR-release" %} 8854 size(4); 8855 ins_encode( enc_membar_release ); 8856 ins_pipe(pipe_class_default); 8857 %} 8858 8859 instruct membar_release_lock() %{ 8860 match(MemBarReleaseLock); 8861 ins_cost(0); 8862 8863 format %{ " -- \t// redundant MEMBAR-release - empty (release in FastUnlock)" %} 8864 size(0); 8865 ins_encode( /*empty*/ ); 8866 ins_pipe(pipe_class_default); 8867 %} 8868 8869 instruct membar_volatile() %{ 8870 match(MemBarVolatile); 8871 ins_cost(4*MEMORY_REF_COST); 8872 8873 format %{ "MEMBAR-volatile" %} 8874 size(4); 8875 ins_encode( enc_membar_volatile ); 8876 ins_pipe(pipe_class_default); 8877 %} 8878 8879 // This optimization is wrong on PPC. The following pattern is not supported: 8880 // MemBarVolatile 8881 // ^ ^ 8882 // | | 8883 // CtrlProj MemProj 8884 // ^ ^ 8885 // | | 8886 // | Load 8887 // | 8888 // MemBarVolatile 8889 // 8890 // The first MemBarVolatile could get optimized out! According to 8891 // Vladimir, this pattern can not occur on Oracle platforms. 8892 // However, it does occur on PPC64 (because of membars in 8893 // inline_unsafe_load_store). 8894 // 8895 // Add this node again if we found a good solution for inline_unsafe_load_store(). 8896 // Don't forget to look at the implementation of post_store_load_barrier again, 8897 // we did other fixes in that method. 8898 //instruct unnecessary_membar_volatile() %{ 8899 // match(MemBarVolatile); 8900 // predicate(Matcher::post_store_load_barrier(n)); 8901 // ins_cost(0); 8902 // 8903 // format %{ " -- \t// redundant MEMBAR-volatile - empty" %} 8904 // size(0); 8905 // ins_encode( /*empty*/ ); 8906 // ins_pipe(pipe_class_default); 8907 //%} 8908 8909 instruct membar_CPUOrder() %{ 8910 match(MemBarCPUOrder); 8911 ins_cost(0); 8912 8913 format %{ " -- \t// MEMBAR-CPUOrder - empty: PPC64 processors are self-consistent." %} 8914 size(0); 8915 ins_encode( /*empty*/ ); 8916 ins_pipe(pipe_class_default); 8917 %} 8918 8919 // New MembarNode in HS23: Replaces lwsync in InitializeNode 8920 instruct membar_storestore() %{ 8921 match(MemBarStoreStore); 8922 ins_cost(4*MEMORY_REF_COST); 8923 8924 format %{ "MEMBAR-store-store" %} 8925 size(4); 8926 ins_encode( enc_membar_release ); 8927 ins_pipe(pipe_class_default); 8928 %} 8929 8930 //----------Conditional Move--------------------------------------------------- 8931 8932 // Cmove using isel. 8933 instruct cmovI_reg_isel(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{ 8934 match(Set dst (CMoveI (Binary cmp crx) (Binary dst src))); 8935 predicate(VM_Version::has_isel()); 8936 ins_cost(DEFAULT_COST+BRANCH_COST); 8937 8938 ins_variable_size_depending_on_alignment(true); 8939 8940 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 8941 size(4); 8942 ins_encode( enc_isel(dst, crx, src, cmp) ); 8943 ins_pipe(pipe_class_default); 8944 %} 8945 8946 instruct cmovI_reg(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{ 8947 match(Set dst (CMoveI (Binary cmp crx) (Binary dst src))); 8948 predicate(!VM_Version::has_isel()); 8949 ins_cost(DEFAULT_COST+BRANCH_COST); 8950 8951 ins_variable_size_depending_on_alignment(true); 8952 8953 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 8954 // Worst case is branch + move + stop, no stop without scheduler 8955 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 8956 ins_encode( enc_cmove_reg(dst, crx, src, cmp) ); 8957 ins_pipe(pipe_class_default); 8958 %} 8959 8960 instruct cmovI_imm(cmpOp cmp, flagsReg crx, iRegIdst dst, immI16 src) %{ 8961 match(Set dst (CMoveI (Binary cmp crx) (Binary dst src))); 8962 ins_cost(DEFAULT_COST+BRANCH_COST); 8963 8964 ins_variable_size_depending_on_alignment(true); 8965 8966 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 8967 // Worst case is branch + move + stop, no stop without scheduler 8968 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 8969 ins_encode( enc_cmove_imm(dst, crx, src, cmp) ); 8970 ins_pipe(pipe_class_default); 8971 %} 8972 8973 // Cmove using isel. 8974 instruct cmovL_reg_isel(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{ 8975 match(Set dst (CMoveL (Binary cmp crx) (Binary dst src))); 8976 predicate(VM_Version::has_isel()); 8977 ins_cost(DEFAULT_COST); 8978 8979 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 8980 size(4); 8981 ins_encode( enc_isel(dst, crx, src, cmp) ); 8982 ins_pipe(pipe_class_default); 8983 %} 8984 8985 instruct cmovL_reg(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{ 8986 match(Set dst (CMoveL (Binary cmp crx) (Binary dst src))); 8987 predicate(!VM_Version::has_isel()); 8988 ins_cost(DEFAULT_COST+BRANCH_COST); 8989 8990 ins_variable_size_depending_on_alignment(true); 8991 8992 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 8993 // Worst case is branch + move + stop, no stop without scheduler. 8994 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 8995 ins_encode( enc_cmove_reg(dst, crx, src, cmp) ); 8996 ins_pipe(pipe_class_default); 8997 %} 8998 8999 instruct cmovL_imm(cmpOp cmp, flagsReg crx, iRegLdst dst, immL16 src) %{ 9000 match(Set dst (CMoveL (Binary cmp crx) (Binary dst src))); 9001 ins_cost(DEFAULT_COST+BRANCH_COST); 9002 9003 ins_variable_size_depending_on_alignment(true); 9004 9005 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9006 // Worst case is branch + move + stop, no stop without scheduler. 9007 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9008 ins_encode( enc_cmove_imm(dst, crx, src, cmp) ); 9009 ins_pipe(pipe_class_default); 9010 %} 9011 9012 // Cmove using isel. 9013 instruct cmovN_reg_isel(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{ 9014 match(Set dst (CMoveN (Binary cmp crx) (Binary dst src))); 9015 predicate(VM_Version::has_isel()); 9016 ins_cost(DEFAULT_COST); 9017 9018 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9019 size(4); 9020 ins_encode( enc_isel(dst, crx, src, cmp) ); 9021 ins_pipe(pipe_class_default); 9022 %} 9023 9024 // Conditional move for RegN. Only cmov(reg, reg). 9025 instruct cmovN_reg(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{ 9026 match(Set dst (CMoveN (Binary cmp crx) (Binary dst src))); 9027 predicate(!VM_Version::has_isel()); 9028 ins_cost(DEFAULT_COST+BRANCH_COST); 9029 9030 ins_variable_size_depending_on_alignment(true); 9031 9032 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9033 // Worst case is branch + move + stop, no stop without scheduler. 9034 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9035 ins_encode( enc_cmove_reg(dst, crx, src, cmp) ); 9036 ins_pipe(pipe_class_default); 9037 %} 9038 9039 instruct cmovN_imm(cmpOp cmp, flagsReg crx, iRegNdst dst, immN_0 src) %{ 9040 match(Set dst (CMoveN (Binary cmp crx) (Binary dst src))); 9041 ins_cost(DEFAULT_COST+BRANCH_COST); 9042 9043 ins_variable_size_depending_on_alignment(true); 9044 9045 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9046 // Worst case is branch + move + stop, no stop without scheduler. 9047 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9048 ins_encode( enc_cmove_imm(dst, crx, src, cmp) ); 9049 ins_pipe(pipe_class_default); 9050 %} 9051 9052 // Cmove using isel. 9053 instruct cmovP_reg_isel(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegPsrc src) %{ 9054 match(Set dst (CMoveP (Binary cmp crx) (Binary dst src))); 9055 predicate(VM_Version::has_isel()); 9056 ins_cost(DEFAULT_COST); 9057 9058 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9059 size(4); 9060 ins_encode( enc_isel(dst, crx, src, cmp) ); 9061 ins_pipe(pipe_class_default); 9062 %} 9063 9064 instruct cmovP_reg(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegP_N2P src) %{ 9065 match(Set dst (CMoveP (Binary cmp crx) (Binary dst src))); 9066 predicate(!VM_Version::has_isel()); 9067 ins_cost(DEFAULT_COST+BRANCH_COST); 9068 9069 ins_variable_size_depending_on_alignment(true); 9070 9071 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9072 // Worst case is branch + move + stop, no stop without scheduler. 9073 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9074 ins_encode( enc_cmove_reg(dst, crx, src, cmp) ); 9075 ins_pipe(pipe_class_default); 9076 %} 9077 9078 instruct cmovP_imm(cmpOp cmp, flagsReg crx, iRegPdst dst, immP_0 src) %{ 9079 match(Set dst (CMoveP (Binary cmp crx) (Binary dst src))); 9080 ins_cost(DEFAULT_COST+BRANCH_COST); 9081 9082 ins_variable_size_depending_on_alignment(true); 9083 9084 format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %} 9085 // Worst case is branch + move + stop, no stop without scheduler. 9086 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9087 ins_encode( enc_cmove_imm(dst, crx, src, cmp) ); 9088 ins_pipe(pipe_class_default); 9089 %} 9090 9091 instruct cmovF_reg(cmpOp cmp, flagsReg crx, regF dst, regF src) %{ 9092 match(Set dst (CMoveF (Binary cmp crx) (Binary dst src))); 9093 ins_cost(DEFAULT_COST+BRANCH_COST); 9094 9095 ins_variable_size_depending_on_alignment(true); 9096 9097 format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %} 9098 // Worst case is branch + move + stop, no stop without scheduler. 9099 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9100 ins_encode( enc_cmovef_reg(dst, crx, src, cmp) ); 9101 ins_pipe(pipe_class_default); 9102 %} 9103 9104 instruct cmovD_reg(cmpOp cmp, flagsReg crx, regD dst, regD src) %{ 9105 match(Set dst (CMoveD (Binary cmp crx) (Binary dst src))); 9106 ins_cost(DEFAULT_COST+BRANCH_COST); 9107 9108 ins_variable_size_depending_on_alignment(true); 9109 9110 format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %} 9111 // Worst case is branch + move + stop, no stop without scheduler. 9112 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9113 ins_encode( enc_cmovef_reg(dst, crx, src, cmp) ); 9114 ins_pipe(pipe_class_default); 9115 %} 9116 9117 //----------Conditional_store-------------------------------------------------- 9118 // Conditional-store of the updated heap-top. 9119 // Used during allocation of the shared heap. 9120 // Sets flags (EQ) on success. Implemented with a CASA on Sparc. 9121 9122 // As compareAndSwapL, but return flag register instead of boolean value in 9123 // int register. 9124 // Used by sun/misc/AtomicLongCSImpl.java. 9125 // Mem_ptr must be a memory operand, else this node does not get 9126 // Flag_needs_anti_dependence_check set by adlc. If this is not set this node 9127 // can be rematerialized which leads to errors. 9128 instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLsrc oldVal, iRegLsrc newVal) %{ 9129 match(Set crx (StoreLConditional mem_ptr (Binary oldVal newVal))); 9130 format %{ "CMPXCHGD if ($crx = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %} 9131 ins_encode( enc_storeConditionalLP(crx, mem_ptr, oldVal, newVal) ); 9132 ins_pipe(pipe_class_default); 9133 %} 9134 9135 // As compareAndSwapP, but return flag register instead of boolean value in 9136 // int register. 9137 // This instruction is matched if UseTLAB is off. 9138 // Mem_ptr must be a memory operand, else this node does not get 9139 // Flag_needs_anti_dependence_check set by adlc. If this is not set this node 9140 // can be rematerialized which leads to errors. 9141 instruct storePConditional_regP_regP_regP(flagsReg crx, indirect mem_ptr, iRegPsrc oldVal, iRegPsrc newVal) %{ 9142 match(Set crx (StorePConditional mem_ptr (Binary oldVal newVal))); 9143 format %{ "CMPXCHGD if ($crx = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %} 9144 ins_encode( enc_storeConditionalLP(crx, mem_ptr, oldVal, newVal) ); 9145 ins_pipe(pipe_class_default); 9146 %} 9147 9148 // Implement LoadPLocked. Must be ordered against changes of the memory location 9149 // by storePConditional. 9150 // Don't know whether this is ever used. 9151 instruct loadPLocked(iRegPdst dst, memory mem) %{ 9152 match(Set dst (LoadPLocked mem)); 9153 ins_cost(MEMORY_REF_COST); 9154 9155 format %{ "LD $dst, $mem \t// loadPLocked\n\t" 9156 "TWI $dst\n\t" 9157 "ISYNC" %} 9158 size(12); 9159 ins_encode( enc_ld_ac(dst, mem) ); 9160 ins_pipe(pipe_class_memory); 9161 %} 9162 9163 //----------Compare-And-Swap--------------------------------------------------- 9164 9165 // CompareAndSwap{P,I,L} have more than one output, therefore "CmpI 9166 // (CompareAndSwap ...)" or "If (CmpI (CompareAndSwap ..))" cannot be 9167 // matched. 9168 9169 instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2) %{ 9170 match(Set res (CompareAndSwapI mem_ptr (Binary src1 src2))); 9171 format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %} 9172 // Variable size: instruction count smaller if regs are disjoint. 9173 ins_encode( enc_compareAndSwapI(res, mem_ptr, src1, src2) ); 9174 ins_pipe(pipe_class_default); 9175 %} 9176 9177 instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2) %{ 9178 match(Set res (CompareAndSwapN mem_ptr (Binary src1 src2))); 9179 format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %} 9180 // Variable size: instruction count smaller if regs are disjoint. 9181 ins_encode( enc_compareAndSwapN(res, mem_ptr, src1, src2) ); 9182 ins_pipe(pipe_class_default); 9183 %} 9184 9185 instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2) %{ 9186 match(Set res (CompareAndSwapL mem_ptr (Binary src1 src2))); 9187 format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %} 9188 // Variable size: instruction count smaller if regs are disjoint. 9189 ins_encode( enc_compareAndSwapLP(res, mem_ptr, src1, src2) ); 9190 ins_pipe(pipe_class_default); 9191 %} 9192 9193 instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2) %{ 9194 match(Set res (CompareAndSwapP mem_ptr (Binary src1 src2))); 9195 format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %} 9196 // Variable size: instruction count smaller if regs are disjoint. 9197 ins_encode( enc_compareAndSwapLP(res, mem_ptr, src1, src2) ); 9198 ins_pipe(pipe_class_default); 9199 %} 9200 9201 instruct getAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{ 9202 match(Set res (GetAndAddI mem_ptr src)); 9203 format %{ "GetAndAddI $res, $mem_ptr, $src" %} 9204 // Variable size: instruction count smaller if regs are disjoint. 9205 ins_encode( enc_GetAndAddI(res, mem_ptr, src) ); 9206 ins_pipe(pipe_class_default); 9207 %} 9208 9209 instruct getAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{ 9210 match(Set res (GetAndAddL mem_ptr src)); 9211 format %{ "GetAndAddL $res, $mem_ptr, $src" %} 9212 // Variable size: instruction count smaller if regs are disjoint. 9213 ins_encode( enc_GetAndAddL(res, mem_ptr, src) ); 9214 ins_pipe(pipe_class_default); 9215 %} 9216 9217 instruct getAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{ 9218 match(Set res (GetAndSetI mem_ptr src)); 9219 format %{ "GetAndSetI $res, $mem_ptr, $src" %} 9220 // Variable size: instruction count smaller if regs are disjoint. 9221 ins_encode( enc_GetAndSetI(res, mem_ptr, src) ); 9222 ins_pipe(pipe_class_default); 9223 %} 9224 9225 instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{ 9226 match(Set res (GetAndSetL mem_ptr src)); 9227 format %{ "GetAndSetL $res, $mem_ptr, $src" %} 9228 // Variable size: instruction count smaller if regs are disjoint. 9229 ins_encode( enc_GetAndSetL(res, mem_ptr, src) ); 9230 ins_pipe(pipe_class_default); 9231 %} 9232 9233 instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src) %{ 9234 match(Set res (GetAndSetP mem_ptr src)); 9235 format %{ "GetAndSetP $res, $mem_ptr, $src" %} 9236 // Variable size: instruction count smaller if regs are disjoint. 9237 ins_encode( enc_GetAndSetL(res, mem_ptr, src) ); 9238 ins_pipe(pipe_class_default); 9239 %} 9240 9241 instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src) %{ 9242 match(Set res (GetAndSetN mem_ptr src)); 9243 format %{ "GetAndSetN $res, $mem_ptr, $src" %} 9244 // Variable size: instruction count smaller if regs are disjoint. 9245 ins_encode( enc_GetAndSetI(res, mem_ptr, src) ); 9246 ins_pipe(pipe_class_default); 9247 %} 9248 9249 //----------Arithmetic Instructions-------------------------------------------- 9250 // Addition Instructions 9251 9252 // PPC has no instruction setting overflow of 32-bit integer. 9253 //instruct addExactI_rReg(rarg4RegI dst, rRegI src, flagsReg cr) %{ 9254 // match(AddExactI dst src); 9255 // effect(DEF cr); 9256 // 9257 // format %{ "ADD $dst, $dst, $src \t// addExact int, sets $cr" %} 9258 // ins_encode( enc_add(dst, dst, src) ); 9259 // ins_pipe(pipe_class_default); 9260 //%} 9261 9262 // Register Addition 9263 instruct addI_reg_reg(iRegIdst dst, iRegIsrc_iRegL2Isrc src1, iRegIsrc_iRegL2Isrc src2) %{ 9264 match(Set dst (AddI src1 src2)); 9265 format %{ "ADD $dst, $src1, $src2" %} 9266 size(4); 9267 ins_encode( enc_add(dst, src1, src2) ); 9268 ins_pipe(pipe_class_default); 9269 %} 9270 9271 // Expand does not work with above instruct. (??) 9272 instruct addI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9273 // no match-rule 9274 effect(DEF dst, USE src1, USE src2); 9275 format %{ "ADD $dst, $src1, $src2" %} 9276 size(4); 9277 ins_encode( enc_add(dst, src1, src2) ); 9278 ins_pipe(pipe_class_default); 9279 %} 9280 9281 instruct tree_addI_addI_addI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{ 9282 match(Set dst (AddI (AddI (AddI src1 src2) src3) src4)); 9283 ins_cost(DEFAULT_COST*3); 9284 9285 expand %{ 9286 // FIXME: we should do this in the ideal world. 9287 iRegIdst tmp1; 9288 iRegIdst tmp2; 9289 addI_reg_reg(tmp1, src1, src2); 9290 addI_reg_reg_2(tmp2, src3, src4); // Adlc complains about addI_reg_reg. 9291 addI_reg_reg(dst, tmp1, tmp2); 9292 %} 9293 %} 9294 9295 // Immediate Addition 9296 instruct addI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 9297 match(Set dst (AddI src1 src2)); 9298 format %{ "ADDI $dst, $src1, $src2" %} 9299 size(4); 9300 ins_encode( enc_addi(dst, src1, src2) ); 9301 ins_pipe(pipe_class_default); 9302 %} 9303 9304 // Immediate Addition with 16-bit shifted operand 9305 instruct addI_reg_immhi16(iRegIdst dst, iRegIsrc src1, immIhi16 src2) %{ 9306 match(Set dst (AddI src1 src2)); 9307 format %{ "ADDIS $dst, $src1, $src2" %} 9308 size(4); 9309 ins_encode( enc_addis(dst, src1, src2) ); 9310 ins_pipe(pipe_class_default); 9311 %} 9312 9313 // Long Addition 9314 instruct addL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9315 match(Set dst (AddL src1 src2)); 9316 format %{ "ADD $dst, $src1, $src2 \t// long" %} 9317 size(4); 9318 ins_encode( enc_add(dst, src1, src2) ); 9319 ins_pipe(pipe_class_default); 9320 %} 9321 9322 // Expand does not work with above instruct. (??) 9323 instruct addL_reg_reg_2(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9324 // no match-rule 9325 effect(DEF dst, USE src1, USE src2); 9326 format %{ "ADD $dst, $src1, $src2 \t// long" %} 9327 size(4); 9328 ins_encode( enc_add(dst, src1, src2) ); 9329 ins_pipe(pipe_class_default); 9330 %} 9331 9332 instruct tree_addL_addL_addL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, iRegLsrc src3, iRegLsrc src4) %{ 9333 match(Set dst (AddL (AddL (AddL src1 src2) src3) src4)); 9334 ins_cost(DEFAULT_COST*3); 9335 9336 expand %{ 9337 // FIXME: we should do this in the ideal world. 9338 iRegLdst tmp1; 9339 iRegLdst tmp2; 9340 addL_reg_reg(tmp1, src1, src2); 9341 addL_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg. 9342 addL_reg_reg(dst, tmp1, tmp2); 9343 %} 9344 %} 9345 9346 // AddL + ConvL2I. 9347 instruct addI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9348 match(Set dst (ConvL2I (AddL src1 src2))); 9349 9350 format %{ "ADD $dst, $src1, $src2 \t// long + l2i" %} 9351 size(4); 9352 ins_encode( enc_add(dst, src1, src2) ); 9353 ins_pipe(pipe_class_default); 9354 %} 9355 9356 // No constant pool entries required. 9357 instruct addL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 con) %{ 9358 match(Set dst (AddL src1 con)); 9359 9360 format %{ "ADDI $dst, $src1, $con" %} 9361 size(4); 9362 ins_encode( enc_addi(dst, src1, con) ); 9363 ins_pipe(pipe_class_default); 9364 %} 9365 9366 // Long Immediate Addition with 16-bit shifted operand. 9367 // No constant pool entries required. 9368 instruct addL_reg_immhi16(iRegLdst dst, iRegLsrc src1, immL32hi16 src2) %{ 9369 match(Set dst (AddL src1 src2)); 9370 9371 format %{ "ADDIS $dst, $src1, $src2" %} 9372 size(4); 9373 ins_encode( enc_addis(dst, src1, src2) ); 9374 ins_pipe(pipe_class_default); 9375 %} 9376 9377 // Pointer Register Addition 9378 instruct addP_reg_reg(iRegPdst dst, iRegP_N2P src1, iRegLsrc src2) %{ 9379 match(Set dst (AddP src1 src2)); 9380 format %{ "ADD $dst, $src1, $src2" %} 9381 size(4); 9382 ins_encode( enc_add(dst, src1, src2) ); 9383 ins_pipe(pipe_class_default); 9384 %} 9385 9386 // Pointer Immediate Addition 9387 // No constant pool entries required. 9388 instruct addP_reg_imm16(iRegPdst dst, iRegP_N2P src1, immL16 src2) %{ 9389 match(Set dst (AddP src1 src2)); 9390 9391 format %{ "ADDI $dst, $src1, $src2" %} 9392 size(4); 9393 ins_encode( enc_addi(dst, src1, src2) ); 9394 ins_pipe(pipe_class_default); 9395 %} 9396 9397 // Pointer Immediate Addition with 16-bit shifted operand. 9398 // No constant pool entries required. 9399 instruct addP_reg_immhi16(iRegPdst dst, iRegP_N2P src1, immL32hi16 src2) %{ 9400 match(Set dst (AddP src1 src2)); 9401 9402 format %{ "ADDIS $dst, $src1, $src2" %} 9403 size(4); 9404 ins_encode( enc_addis(dst, src1, src2) ); 9405 ins_pipe(pipe_class_default); 9406 %} 9407 9408 //--------------------- 9409 // Subtraction Instructions 9410 9411 // Register Subtraction 9412 instruct subI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9413 match(Set dst (SubI src1 src2)); 9414 format %{ "SUBF $dst, $src2, $src1" %} 9415 size(4); 9416 ins_encode( enc_subf(dst, src2, src1) ); 9417 ins_pipe(pipe_class_default); 9418 %} 9419 9420 // Immediate Subtraction 9421 // The compiler converts "x-c0" into "x+ -c0" (see SubINode::Ideal), 9422 // so this rule seems to be unused. 9423 instruct subI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 9424 match(Set dst (SubI src1 src2)); 9425 format %{ "SUBI $dst, $src1, $src2" %} 9426 size(4); 9427 ins_encode( enc_addi_neg(dst, src1, src2) ); 9428 ins_pipe(pipe_class_default); 9429 %} 9430 9431 // SubI from constant (using subfic). 9432 instruct subI_imm16_reg(iRegIdst dst, immI16 src1, iRegIsrc src2) %{ 9433 match(Set dst (SubI src1 src2)); 9434 format %{ "SUBI $dst, $src1, $src2" %} 9435 9436 size(4); 9437 ins_encode( enc_subfic(dst, src2, src1) ); 9438 ins_pipe(pipe_class_default); 9439 %} 9440 9441 // Turn the sign-bit of an integer into a 32-bit mask, 0x0...0 for 9442 // positive integers and 0xF...F for negative ones. 9443 instruct signmask32I_regI(iRegIdst dst, iRegIsrc src) %{ 9444 // no match-rule, false predicate 9445 effect(DEF dst, USE src); 9446 predicate(false); 9447 9448 format %{ "SRAWI $dst, $src, #31" %} 9449 size(4); 9450 ins_encode( enc_srawi(dst, src, 0x1f) ); 9451 ins_pipe(pipe_class_default); 9452 %} 9453 9454 instruct absI_reg_Ex(iRegIdst dst, iRegIsrc src) %{ 9455 match(Set dst (AbsI src)); 9456 ins_cost(DEFAULT_COST*3); 9457 9458 expand %{ 9459 iRegIdst tmp1; 9460 iRegIdst tmp2; 9461 signmask32I_regI(tmp1, src); 9462 xorI_reg_reg(tmp2, tmp1, src); 9463 subI_reg_reg(dst, tmp2, tmp1); 9464 %} 9465 %} 9466 9467 instruct negI_regI(iRegIdst dst, immI_0 zero, iRegIsrc src2) %{ 9468 match(Set dst (SubI zero src2)); 9469 format %{ "NEG $dst, $src2" %} 9470 size(4); 9471 ins_encode( enc_neg(dst, src2) ); 9472 ins_pipe(pipe_class_default); 9473 %} 9474 9475 // Long subtraction 9476 instruct subL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9477 match(Set dst (SubL src1 src2)); 9478 format %{ "SUBF $dst, $src2, $src1 \t// long" %} 9479 size(4); 9480 ins_encode( enc_subf(dst, src2, src1) ); 9481 ins_pipe(pipe_class_default); 9482 %} 9483 9484 // SubL + convL2I. 9485 instruct subI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9486 match(Set dst (ConvL2I (SubL src1 src2))); 9487 9488 format %{ "SUBF $dst, $src2, $src1 \t// long + l2i" %} 9489 size(4); 9490 ins_encode( enc_subf(dst, src2, src1) ); 9491 ins_pipe(pipe_class_default); 9492 %} 9493 9494 // Immediate Subtraction 9495 // The compiler converts "x-c0" into "x+ -c0" (see SubLNode::Ideal), 9496 // so this rule seems to be unused. 9497 // No constant pool entries required. 9498 instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{ 9499 match(Set dst (SubL src1 src2)); 9500 9501 format %{ "SUBI $dst, $src1, $src2 \t// long" %} 9502 size(4); 9503 ins_encode( enc_addi_neg(dst, src1, src2) ); 9504 ins_pipe(pipe_class_default); 9505 %} 9506 9507 // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for 9508 // positive longs and 0xF...F for negative ones. 9509 instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{ 9510 // no match-rule, false predicate 9511 effect(DEF dst, USE src); 9512 predicate(false); 9513 9514 format %{ "SRADI $dst, $src, #63" %} 9515 size(4); 9516 ins_encode %{ 9517 // TODO: PPC port $archOpcode(ppc64Opcode_sradi); 9518 __ sradi($dst$$Register, $src$$Register, 0x3f); 9519 %} 9520 ins_pipe(pipe_class_default); 9521 %} 9522 9523 // Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for 9524 // positive longs and 0xF...F for negative ones. 9525 instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{ 9526 // no match-rule, false predicate 9527 effect(DEF dst, USE src); 9528 predicate(false); 9529 9530 format %{ "SRADI $dst, $src, #63" %} 9531 size(4); 9532 ins_encode( enc_sradi(dst, src, 0x3f) ); 9533 ins_pipe(pipe_class_default); 9534 %} 9535 9536 // Long negation 9537 instruct negL_reg_reg(iRegLdst dst, immL_0 zero, iRegLsrc src2) %{ 9538 match(Set dst (SubL zero src2)); 9539 format %{ "NEG $dst, $src2 \t// long" %} 9540 size(4); 9541 ins_encode( enc_neg(dst, src2) ); 9542 ins_pipe(pipe_class_default); 9543 %} 9544 9545 // NegL + ConvL2I. 9546 instruct negI_con0_regL(iRegIdst dst, immL_0 zero, iRegLsrc src2) %{ 9547 match(Set dst (ConvL2I (SubL zero src2))); 9548 9549 format %{ "NEG $dst, $src2 \t// long + l2i" %} 9550 size(4); 9551 ins_encode( enc_neg(dst, src2) ); 9552 ins_pipe(pipe_class_default); 9553 %} 9554 9555 // Multiplication Instructions 9556 // Integer Multiplication 9557 9558 // Register Multiplication 9559 instruct mulI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9560 match(Set dst (MulI src1 src2)); 9561 ins_cost(DEFAULT_COST); 9562 9563 format %{ "MULLW $dst, $src1, $src2" %} 9564 size(4); 9565 ins_encode( enc_mullw(dst, src1, src2) ); 9566 ins_pipe(pipe_class_default); 9567 %} 9568 9569 // Immediate Multiplication 9570 instruct mulI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{ 9571 match(Set dst (MulI src1 src2)); 9572 ins_cost(DEFAULT_COST); 9573 9574 format %{ "MULLI $dst, $src1, $src2" %} 9575 size(4); 9576 ins_encode( enc_mulli(dst, src1, src2) ); 9577 ins_pipe(pipe_class_default); 9578 %} 9579 9580 instruct mulL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9581 match(Set dst (MulL src1 src2)); 9582 ins_cost(DEFAULT_COST); 9583 9584 format %{ "MULLD $dst $src1, $src2 \t// long" %} 9585 size(4); 9586 ins_encode( enc_mulld(dst, src1, src2) ); 9587 ins_pipe(pipe_class_default); 9588 %} 9589 9590 // Multiply high for optimized long division by constant. 9591 instruct mulHighL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9592 match(Set dst (MulHiL src1 src2)); 9593 ins_cost(DEFAULT_COST); 9594 9595 format %{ "MULHD $dst $src1, $src2 \t// long" %} 9596 size(4); 9597 ins_encode( enc_mulhd(dst, src1, src2) ); 9598 ins_pipe(pipe_class_default); 9599 %} 9600 9601 // Immediate Multiplication 9602 instruct mulL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{ 9603 match(Set dst (MulL src1 src2)); 9604 ins_cost(DEFAULT_COST); 9605 9606 format %{ "MULLI $dst, $src1, $src2" %} 9607 size(4); 9608 ins_encode( enc_mulli(dst, src1, src2) ); 9609 ins_pipe(pipe_class_default); 9610 %} 9611 9612 // Integer Division with Immediate -1: Negate. 9613 instruct divI_reg_immIvalueMinus1(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{ 9614 match(Set dst (DivI src1 src2)); 9615 ins_cost(DEFAULT_COST); 9616 9617 format %{ "NEG $dst, $src1 \t// /-1" %} 9618 size(4); 9619 ins_encode( enc_neg(dst, src1) ); 9620 ins_pipe(pipe_class_default); 9621 %} 9622 9623 // Integer Division with constant, but not -1. 9624 // We should be able to improve this by checking the type of src2. 9625 // It might well be that src2 is known to be positive. 9626 instruct divI_reg_regnotMinus1(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9627 match(Set dst (DivI src1 src2)); 9628 predicate(n->in(2)->find_int_con(-1) != -1); // src2 is a constant, but not -1 9629 ins_cost(2*DEFAULT_COST); 9630 9631 format %{ "DIVW $dst, $src1, $src2 \t// /not-1" %} 9632 size(4); 9633 ins_encode( enc_divw(dst, src1, src2) ); 9634 ins_pipe(pipe_class_default); 9635 %} 9636 9637 instruct cmovI_bne_negI_reg(iRegIdst dst, flagsReg crx, iRegIsrc src1) %{ 9638 effect(USE_DEF dst, USE src1, USE crx); 9639 predicate(false); 9640 9641 ins_variable_size_depending_on_alignment(true); 9642 9643 format %{ "CMOVE $dst, neg($src1), $crx" %} 9644 // Worst case is branch + move + stop, no stop without scheduler. 9645 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9646 ins_encode( enc_cmove_bne_neg(dst, crx, src1) ); 9647 ins_pipe(pipe_class_default); 9648 %} 9649 9650 // Integer Division with Registers not containing constants. 9651 instruct divI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9652 match(Set dst (DivI src1 src2)); 9653 ins_cost(10*DEFAULT_COST); 9654 9655 expand %{ 9656 immI16 imm %{ (int)-1 %} 9657 flagsReg tmp1; 9658 cmpI_reg_imm16(tmp1, src2, imm); // check src2 == -1 9659 divI_reg_regnotMinus1(dst, src1, src2); // dst = src1 / src2 9660 cmovI_bne_negI_reg(dst, tmp1, src1); // cmove dst = neg(src1) if src2 == -1 9661 %} 9662 %} 9663 9664 // Long Division with Immediate -1: Negate. 9665 instruct divL_reg_immLvalueMinus1(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{ 9666 match(Set dst (DivL src1 src2)); 9667 ins_cost(DEFAULT_COST); 9668 9669 format %{ "NEG $dst, $src1 \t// /-1, long" %} 9670 size(4); 9671 ins_encode( enc_neg(dst, src1) ); 9672 ins_pipe(pipe_class_default); 9673 %} 9674 9675 // Long Division with constant, but not -1. 9676 instruct divL_reg_regnotMinus1(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9677 match(Set dst (DivL src1 src2)); 9678 predicate(n->in(2)->find_long_con(-1L) != -1L); // Src2 is a constant, but not -1. 9679 ins_cost(2*DEFAULT_COST); 9680 9681 format %{ "DIVD $dst, $src1, $src2 \t// /not-1, long" %} 9682 size(4); 9683 ins_encode( enc_divd(dst, src1, src2) ); 9684 ins_pipe(pipe_class_default); 9685 %} 9686 9687 instruct cmovL_bne_negL_reg(iRegLdst dst, flagsReg crx, iRegLsrc src1) %{ 9688 effect(USE_DEF dst, USE src1, USE crx); 9689 predicate(false); 9690 9691 ins_variable_size_depending_on_alignment(true); 9692 9693 format %{ "CMOVE $dst, neg($src1), $crx" %} 9694 // Worst case is branch + move + stop, no stop without scheduler. 9695 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 9696 ins_encode( enc_cmove_bne_neg(dst, crx, src1) ); 9697 ins_pipe(pipe_class_default); 9698 %} 9699 9700 // Long Division with Registers not containing constants. 9701 instruct divL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 9702 match(Set dst (DivL src1 src2)); 9703 ins_cost(10*DEFAULT_COST); 9704 9705 expand %{ 9706 immL16 imm %{ (int)-1 %} 9707 flagsReg tmp1; 9708 cmpL_reg_imm16(tmp1, src2, imm); // check src2 == -1 9709 divL_reg_regnotMinus1(dst, src1, src2); // dst = src1 / src2 9710 cmovL_bne_negL_reg(dst, tmp1, src1); // cmove dst = neg(src1) if src2 == -1 9711 %} 9712 %} 9713 9714 // Integer Remainder with registers. 9715 instruct modI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9716 match(Set dst (ModI src1 src2)); 9717 ins_cost(10*DEFAULT_COST); 9718 9719 expand %{ 9720 immI16 imm %{ (int)-1 %} 9721 flagsReg tmp1; 9722 iRegIdst tmp2; 9723 iRegIdst tmp3; 9724 cmpI_reg_imm16(tmp1, src2, imm); // check src2 == -1 9725 divI_reg_regnotMinus1(tmp2, src1, src2); // tmp2 = src1 / src2 9726 cmovI_bne_negI_reg(tmp2, tmp1, src1); // cmove tmp2 = neg(src1) if src2 == -1 9727 mulI_reg_reg(tmp3, src2, tmp2); // tmp3 = src2 * tmp2 9728 subI_reg_reg(dst, src1, tmp3); // dst = src1 - tmp3 9729 %} 9730 %} 9731 9732 // Long Remainder with registers 9733 instruct modL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{ 9734 match(Set dst (ModL src1 src2)); 9735 ins_cost(10*DEFAULT_COST); 9736 9737 expand %{ 9738 immL16 imm %{ (int)-1 %} 9739 flagsReg tmp1; 9740 iRegLdst tmp2; 9741 iRegLdst tmp3; 9742 cmpL_reg_imm16(tmp1, src2, imm); // check src2 == -1 9743 divL_reg_regnotMinus1(tmp2, src1, src2); // tmp2 = src1 / src2 9744 cmovL_bne_negL_reg(tmp2, tmp1, src1); // cmove tmp2 = neg(src1) if src2 == -1 9745 mulL_reg_reg(tmp3, src2, tmp2); // tmp3 = src2 * tmp2 9746 subL_reg_reg(dst, src1, tmp3); // dst = src1 - tmp3 9747 %} 9748 %} 9749 9750 // Integer Shift Instructions 9751 9752 // Register Shift Left 9753 9754 // Clear all but the lowest #mask bits. 9755 // Used to normalize shift amounts in registers. 9756 instruct maskI_reg_imm(iRegIdst dst, iRegIsrc src, uimmI6 mask) %{ 9757 // no match-rule, false predicate 9758 effect(DEF dst, USE src, USE mask); 9759 predicate(false); 9760 9761 format %{ "MASK $dst, $src, $mask \t// clear $mask upper bits" %} 9762 size(4); 9763 ins_encode( enc_clrldi(dst, src, mask) ); 9764 ins_pipe(pipe_class_default); 9765 %} 9766 9767 instruct lShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9768 // no match-rule, false predicate 9769 effect(DEF dst, USE src1, USE src2); 9770 predicate(false); 9771 9772 format %{ "SLW $dst, $src1, $src2" %} 9773 size(4); 9774 ins_encode( enc_slw(dst, src1, src2) ); 9775 ins_pipe(pipe_class_default); 9776 %} 9777 9778 instruct lShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9779 match(Set dst (LShiftI src1 src2)); 9780 ins_cost(DEFAULT_COST*2); 9781 expand %{ 9782 uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %} 9783 iRegIdst tmpI; 9784 maskI_reg_imm(tmpI, src2, mask); 9785 lShiftI_reg_reg(dst, src1, tmpI); 9786 %} 9787 %} 9788 9789 // Register Shift Left Immediate 9790 instruct lShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{ 9791 match(Set dst (LShiftI src1 src2)); 9792 9793 format %{ "SLWI $dst, $src1, ($src2 & 0x1f)" %} 9794 size(4); 9795 ins_encode( enc_slwi(dst, src1, src2) ); 9796 ins_pipe(pipe_class_default); 9797 %} 9798 9799 // AndI with negpow2-constant + LShiftI 9800 instruct lShiftI_andI_immInegpow2_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{ 9801 match(Set dst (LShiftI (AndI src1 src2) src3)); 9802 predicate(UseRotateAndMaskInstructionsPPC64); 9803 9804 format %{ "RLWINM $dst, lShiftI(AndI($src1, $src2), $src3)" %} 9805 size(4); 9806 ins_encode( enc_lShiftI_andI_immInegpow2_imm5(dst, src1, src2, src3) ); 9807 ins_pipe(pipe_class_default); 9808 %} 9809 9810 // RShiftI + AndI with negpow2-constant + LShiftI 9811 instruct lShiftI_andI_immInegpow2_rShiftI_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{ 9812 match(Set dst (LShiftI (AndI (RShiftI src1 src3) src2) src3)); 9813 predicate(UseRotateAndMaskInstructionsPPC64); 9814 9815 format %{ "RLWINM $dst, lShiftI(AndI(RShiftI($src1, $src3), $src2), $src3)" %} 9816 size(4); 9817 ins_encode( enc_lShiftI_andI_immInegpow2_rShiftI_imm5(dst, src1, src2, src3) ); 9818 ins_pipe(pipe_class_default); 9819 %} 9820 9821 instruct lShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 9822 // no match-rule, false predicate 9823 effect(DEF dst, USE src1, USE src2); 9824 predicate(false); 9825 9826 format %{ "SLD $dst, $src1, $src2" %} 9827 size(4); 9828 ins_encode( enc_sld(dst, src1, src2) ); 9829 ins_pipe(pipe_class_default); 9830 %} 9831 9832 // Register Shift Left 9833 instruct lShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 9834 match(Set dst (LShiftL src1 src2)); 9835 ins_cost(DEFAULT_COST*2); 9836 expand %{ 9837 uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %} 9838 iRegIdst tmpI; 9839 maskI_reg_imm(tmpI, src2, mask); 9840 lShiftL_regL_regI(dst, src1, tmpI); 9841 %} 9842 %} 9843 9844 // Register Shift Left Immediate 9845 instruct lshiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{ 9846 match(Set dst (LShiftL src1 src2)); 9847 format %{ "SLDI $dst, $src1, ($src2 & 0x3f)" %} 9848 size(4); 9849 ins_encode( enc_sldi(dst, src1, src2) ); 9850 ins_pipe(pipe_class_default); 9851 %} 9852 9853 // If we shift more than 32 bits, we need not convert I2L. 9854 instruct lShiftL_regI_immGE32(iRegLdst dst, iRegIsrc src1, uimmI6_ge32 src2) %{ 9855 match(Set dst (LShiftL (ConvI2L src1) src2)); 9856 ins_cost(DEFAULT_COST); 9857 9858 size(4); 9859 format %{ "SLDI $dst, i2l($src1), $src2" %} 9860 ins_encode( enc_sldi(dst, src1, src2) ); 9861 ins_pipe(pipe_class_default); 9862 %} 9863 9864 // Shift a postivie int to the left. 9865 // Clrlsldi clears the upper 32 bits and shifts. 9866 instruct scaledPositiveI2L_lShiftL_convI2L_reg_imm6(iRegLdst dst, iRegIsrc src1, uimmI6 src2) %{ 9867 match(Set dst (LShiftL (ConvI2L src1) src2)); 9868 predicate(((ConvI2LNode*)(_kids[0]->_leaf))->type()->is_long()->is_positive_int()); 9869 9870 format %{ "SLDI $dst, i2l(positive_int($src1)), $src2" %} 9871 size(4); 9872 ins_encode( enc_clrlsldi(dst, src1, 0x20, src2) ); 9873 ins_pipe(pipe_class_default); 9874 %} 9875 9876 instruct arShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9877 // no match-rule, false predicate 9878 effect(DEF dst, USE src1, USE src2); 9879 predicate(false); 9880 9881 format %{ "SRAW $dst, $src1, $src2" %} 9882 size(4); 9883 ins_encode( enc_sraw(dst, src1, src2) ); 9884 ins_pipe(pipe_class_default); 9885 %} 9886 9887 // Register Arithmetic Shift Right 9888 instruct arShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9889 match(Set dst (RShiftI src1 src2)); 9890 ins_cost(DEFAULT_COST*2); 9891 expand %{ 9892 uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %} 9893 iRegIdst tmpI; 9894 maskI_reg_imm(tmpI, src2, mask); 9895 arShiftI_reg_reg(dst, src1, tmpI); 9896 %} 9897 %} 9898 9899 // Register Arithmetic Shift Right Immediate 9900 instruct arShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{ 9901 match(Set dst (RShiftI src1 src2)); 9902 9903 format %{ "SRAWI $dst, $src1, ($src2 & 0x1f)" %} 9904 size(4); 9905 ins_encode( enc_srawi(dst, src1, src2) ); 9906 ins_pipe(pipe_class_default); 9907 %} 9908 9909 instruct arShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 9910 // no match-rule, false predicate 9911 effect(DEF dst, USE src1, USE src2); 9912 predicate(false); 9913 9914 format %{ "SRAD $dst, $src1, $src2" %} 9915 size(4); 9916 ins_encode( enc_srad(dst, src1, src2) ); 9917 ins_pipe(pipe_class_default); 9918 %} 9919 9920 // Register Shift Right Arithmetic Long 9921 instruct arShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 9922 match(Set dst (RShiftL src1 src2)); 9923 ins_cost(DEFAULT_COST*2); 9924 9925 expand %{ 9926 uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %} 9927 iRegIdst tmpI; 9928 maskI_reg_imm(tmpI, src2, mask); 9929 arShiftL_regL_regI(dst, src1, tmpI); 9930 %} 9931 %} 9932 9933 // Register Shift Right Immediate 9934 instruct arShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{ 9935 match(Set dst (RShiftL src1 src2)); 9936 9937 format %{ "SRADI $dst, $src1, ($src2 & 0x3f)" %} 9938 size(4); 9939 ins_encode( enc_sradi(dst, src1, src2) ); 9940 ins_pipe(pipe_class_default); 9941 %} 9942 9943 // RShiftL + ConvL2I 9944 instruct convL2I_arShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{ 9945 match(Set dst (ConvL2I (RShiftL src1 src2))); 9946 9947 format %{ "SRADI $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %} 9948 size(4); 9949 ins_encode( enc_sradi(dst, src1, src2) ); 9950 ins_pipe(pipe_class_default); 9951 %} 9952 9953 instruct urShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9954 // no match-rule, false predicate 9955 effect(DEF dst, USE src1, USE src2); 9956 predicate(false); 9957 9958 format %{ "SRW $dst, $src1, $src2" %} 9959 size(4); 9960 ins_encode( enc_srw(dst, src1, src2) ); 9961 ins_pipe(pipe_class_default); 9962 %} 9963 9964 // Register Shift Right 9965 instruct urShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 9966 match(Set dst (URShiftI src1 src2)); 9967 ins_cost(DEFAULT_COST*2); 9968 9969 expand %{ 9970 uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %} 9971 iRegIdst tmpI; 9972 maskI_reg_imm(tmpI, src2, mask); 9973 urShiftI_reg_reg(dst, src1, tmpI); 9974 %} 9975 %} 9976 9977 // Register Shift Right Immediate 9978 instruct urShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{ 9979 match(Set dst (URShiftI src1 src2)); 9980 9981 format %{ "SRWI $dst, $src1, ($src2 & 0x1f)" %} 9982 size(4); 9983 ins_encode( enc_srwi(dst, src1, src2) ); 9984 ins_pipe(pipe_class_default); 9985 %} 9986 9987 instruct urShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 9988 // no match-rule, false predicate 9989 effect(DEF dst, USE src1, USE src2); 9990 predicate(false); 9991 9992 format %{ "SRD $dst, $src1, $src2" %} 9993 size(4); 9994 ins_encode( enc_srd(dst, src1, src2) ); 9995 ins_pipe(pipe_class_default); 9996 %} 9997 9998 // Register Shift Right 9999 instruct urShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{ 10000 match(Set dst (URShiftL src1 src2)); 10001 ins_cost(DEFAULT_COST*2); 10002 10003 expand %{ 10004 uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %} 10005 iRegIdst tmpI; 10006 maskI_reg_imm(tmpI, src2, mask); 10007 urShiftL_regL_regI(dst, src1, tmpI); 10008 %} 10009 %} 10010 10011 // Register Shift Right Immediate 10012 instruct urShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{ 10013 match(Set dst (URShiftL src1 src2)); 10014 10015 format %{ "SRDI $dst, $src1, ($src2 & 0x3f)" %} 10016 size(4); 10017 ins_encode( enc_srdi(dst, src1, src2) ); 10018 ins_pipe(pipe_class_default); 10019 %} 10020 10021 // URShiftL + ConvL2I. 10022 instruct convL2I_urShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{ 10023 match(Set dst (ConvL2I (URShiftL src1 src2))); 10024 10025 format %{ "SRDI $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %} 10026 size(4); 10027 ins_encode( enc_srdi(dst, src1, src2) ); 10028 ins_pipe(pipe_class_default); 10029 %} 10030 10031 // Register Shift Right Immediate with a CastP2X 10032 instruct shrP_convP2X_reg_imm6(iRegLdst dst, iRegP_N2P src1, uimmI6 src2) %{ 10033 match(Set dst (URShiftL (CastP2X src1) src2)); 10034 10035 format %{ "SRDI $dst, $src1, $src2 \t// Cast ptr $src1 to long and shift" %} 10036 size(4); 10037 ins_encode( enc_srdi(dst, src1, src2) ); 10038 ins_pipe(pipe_class_default); 10039 %} 10040 10041 instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{ 10042 match(Set dst (ConvL2I (ConvI2L src))); 10043 10044 format %{ "EXTSW $dst, $src \t// int->int" %} 10045 size(4); 10046 ins_encode( enc_extswII(dst, src) ); 10047 ins_pipe(pipe_class_default); 10048 %} 10049 10050 //----------Rotate Instructions------------------------------------------------ 10051 10052 // Rotate Left by 8-bit immediate 10053 instruct rotlI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 lshift, immI8 rshift) %{ 10054 match(Set dst (OrI (LShiftI src lshift) (URShiftI src rshift))); 10055 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f)); 10056 10057 format %{ "ROTLWI $dst, $src, $lshift" %} 10058 size(4); 10059 ins_encode( enc_rotlwi(dst, src, lshift) ); 10060 ins_pipe(pipe_class_default); 10061 %} 10062 10063 // Rotate Right by 8-bit immediate 10064 instruct rotrI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 rshift, immI8 lshift) %{ 10065 match(Set dst (OrI (URShiftI src rshift) (LShiftI src lshift))); 10066 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f)); 10067 10068 format %{ "ROTRWI $dst, $rshift" %} 10069 size(4); 10070 ins_encode( enc_rotrwi(dst, src, rshift) ); 10071 ins_pipe(pipe_class_default); 10072 %} 10073 10074 //----------Floating Point Arithmetic Instructions----------------------------- 10075 10076 // Add float single precision 10077 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{ 10078 match(Set dst (AddF src1 src2)); 10079 10080 format %{ "FADDS $dst, $src1, $src2" %} 10081 size(4); 10082 ins_encode( enc_fadds(dst, src1, src2) ); 10083 ins_pipe(pipe_class_default); 10084 %} 10085 10086 // Add float double precision 10087 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{ 10088 match(Set dst (AddD src1 src2)); 10089 10090 format %{ "FADD $dst, $src1, $src2" %} 10091 size(4); 10092 ins_encode( enc_fadd(dst, src1, src2) ); 10093 ins_pipe(pipe_class_default); 10094 %} 10095 10096 // Sub float single precision 10097 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{ 10098 match(Set dst (SubF src1 src2)); 10099 10100 format %{ "FSUBS $dst, $src1, $src2" %} 10101 size(4); 10102 ins_encode( enc_fsubs(dst, src1, src2) ); 10103 ins_pipe(pipe_class_default); 10104 %} 10105 10106 // Sub float double precision 10107 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{ 10108 match(Set dst (SubD src1 src2)); 10109 format %{ "FSUB $dst, $src1, $src2" %} 10110 size(4); 10111 ins_encode( enc_fsub(dst, src1, src2) ); 10112 ins_pipe(pipe_class_default); 10113 %} 10114 10115 // Mul float single precision 10116 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{ 10117 match(Set dst (MulF src1 src2)); 10118 format %{ "FMULS $dst, $src1, $src2" %} 10119 size(4); 10120 ins_encode( enc_fmuls(dst, src1, src2) ); 10121 ins_pipe(pipe_class_default); 10122 %} 10123 10124 // Mul float double precision 10125 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{ 10126 match(Set dst (MulD src1 src2)); 10127 format %{ "FMUL $dst, $src1, $src2" %} 10128 size(4); 10129 ins_encode( enc_fmul(dst, src1, src2) ); 10130 ins_pipe(pipe_class_default); 10131 %} 10132 10133 // Div float single precision 10134 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{ 10135 match(Set dst (DivF src1 src2)); 10136 format %{ "FDIVS $dst, $src1, $src2" %} 10137 size(4); 10138 ins_encode( enc_fdivs(dst, src1, src2) ); 10139 ins_pipe(pipe_class_default); 10140 %} 10141 10142 // Div float double precision 10143 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{ 10144 match(Set dst (DivD src1 src2)); 10145 format %{ "FDIV $dst, $src1, $src2" %} 10146 size(4); 10147 ins_encode( enc_fdiv(dst, src1, src2) ); 10148 ins_pipe(pipe_class_default); 10149 %} 10150 10151 // Absolute float single precision 10152 instruct absF_reg(regF dst, regF src) %{ 10153 match(Set dst (AbsF src)); 10154 format %{ "FABS $dst, $src \t// float" %} 10155 size(4); 10156 ins_encode( enc_fabs(dst, src) ); 10157 ins_pipe(pipe_class_default); 10158 %} 10159 10160 // Absolute float double precision 10161 instruct absD_reg(regD dst, regD src) %{ 10162 match(Set dst (AbsD src)); 10163 format %{ "FABS $dst, $src \t// double" %} 10164 size(4); 10165 ins_encode( enc_fabs(dst, src) ); 10166 ins_pipe(pipe_class_default); 10167 %} 10168 10169 instruct negF_reg(regF dst, regF src) %{ 10170 match(Set dst (NegF src)); 10171 format %{ "FNEG $dst, $src \t// float" %} 10172 size(4); 10173 ins_encode( enc_fneg(dst, src) ); 10174 ins_pipe(pipe_class_default); 10175 %} 10176 10177 instruct negD_reg(regD dst, regD src) %{ 10178 match(Set dst (NegD src)); 10179 format %{ "FNEG $dst, $src \t// double" %} 10180 size(4); 10181 ins_encode( enc_fneg(dst, src) ); 10182 ins_pipe(pipe_class_default); 10183 %} 10184 10185 // AbsF + NegF. 10186 instruct negF_absF_reg(regF dst, regF src) %{ 10187 match(Set dst (NegF (AbsF src))); 10188 format %{ "FNABS $dst, $src \t// float" %} 10189 size(4); 10190 ins_encode( enc_fnabs(dst, src) ); 10191 ins_pipe(pipe_class_default); 10192 %} 10193 10194 // AbsD + NegD. 10195 instruct negD_absD_reg(regD dst, regD src) %{ 10196 match(Set dst (NegD (AbsD src))); 10197 format %{ "FNABS $dst, $src \t// double" %} 10198 size(4); 10199 ins_encode( enc_fnabs(dst, src) ); 10200 ins_pipe(pipe_class_default); 10201 %} 10202 10203 // VM_Version::has_fsqrt() decides if this node will be used. 10204 // Sqrt float double precision 10205 instruct sqrtD_reg(regD dst, regD src) %{ 10206 match(Set dst (SqrtD src)); 10207 format %{ "FSQRT $dst, $src" %} 10208 size(4); 10209 ins_encode( enc_fsqrt(dst, src) ); 10210 ins_pipe(pipe_class_default); 10211 %} 10212 10213 // Single-precision sqrt. 10214 instruct sqrtF_reg(regF dst, regF src) %{ 10215 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 10216 predicate(VM_Version::has_fsqrts()); 10217 ins_cost(DEFAULT_COST); 10218 10219 format %{ "FSQRTS $dst, $src" %} 10220 size(4); 10221 ins_encode( enc_fsqrts(dst, src) ); 10222 ins_pipe(pipe_class_default); 10223 %} 10224 10225 instruct roundDouble_nop(regD dst) %{ 10226 match(Set dst (RoundDouble dst)); 10227 ins_cost(0); 10228 10229 format %{ " -- \t// RoundDouble not needed - empty" %} 10230 size(0); 10231 // PPC results are already "rounded" (i.e., normal-format IEEE). 10232 ins_encode( /*empty*/ ); 10233 ins_pipe(pipe_class_default); 10234 %} 10235 10236 instruct roundFloat_nop(regF dst) %{ 10237 match(Set dst (RoundFloat dst)); 10238 ins_cost(0); 10239 10240 format %{ " -- \t// RoundFloat not needed - empty" %} 10241 size(0); 10242 // PPC results are already "rounded" (i.e., normal-format IEEE). 10243 ins_encode( /*empty*/ ); 10244 ins_pipe(pipe_class_default); 10245 %} 10246 10247 //----------Logical Instructions----------------------------------------------- 10248 10249 // And Instructions 10250 10251 // Register And 10252 instruct andI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 10253 match(Set dst (AndI src1 src2)); 10254 format %{ "AND $dst, $src1, $src2" %} 10255 size(4); 10256 ins_encode( enc_and(dst, src1, src2) ); 10257 ins_pipe(pipe_class_default); 10258 %} 10259 10260 // Immediate And 10261 instruct andI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2, flagsRegCR0 cr0) %{ 10262 match(Set dst (AndI src1 src2)); 10263 effect(KILL cr0); 10264 10265 format %{ "ANDI $dst, $src1, $src2" %} 10266 size(4); 10267 ins_encode( enc_andi(dst, src1, src2) ); 10268 ins_pipe(pipe_class_default); 10269 %} 10270 10271 // Immediate And where the immediate is a negative power of 2. 10272 instruct andI_reg_immInegpow2(iRegIdst dst, iRegIsrc src1, immInegpow2 src2) %{ 10273 match(Set dst (AndI src1 src2)); 10274 format %{ "ANDWI $dst, $src1, $src2" %} 10275 size(4); 10276 ins_encode( enc_andI_reg_immInegpow2(dst, src1, src2) ); 10277 ins_pipe(pipe_class_default); 10278 %} 10279 10280 instruct andI_reg_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immIpow2minus1 src2) %{ 10281 match(Set dst (AndI src1 src2)); 10282 format %{ "ANDWI $dst, $src1, $src2" %} 10283 size(4); 10284 ins_encode( enc_and_reg_immpow2minus1(dst, src1, src2) ); 10285 ins_pipe(pipe_class_default); 10286 %} 10287 10288 instruct andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %{ 10289 match(Set dst (AndI src1 src2)); 10290 predicate(UseRotateAndMaskInstructionsPPC64); 10291 format %{ "ANDWI $dst, $src1, $src2" %} 10292 size(4); 10293 ins_encode( enc_andI_reg_immIpowerOf2(dst, src1, src2) ); 10294 ins_pipe(pipe_class_default); 10295 %} 10296 10297 // Register And Long 10298 instruct andL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 10299 match(Set dst (AndL src1 src2)); 10300 ins_cost(DEFAULT_COST); 10301 10302 format %{ "AND $dst, $src1, $src2 \t// long" %} 10303 size(4); 10304 ins_encode( enc_and(dst, src1, src2) ); 10305 ins_pipe(pipe_class_default); 10306 %} 10307 10308 // Immediate And long 10309 instruct andL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2, flagsRegCR0 cr0) %{ 10310 match(Set dst (AndL src1 src2)); 10311 effect(KILL cr0); 10312 ins_cost(DEFAULT_COST); 10313 10314 format %{ "ANDI $dst, $src1, $src2 \t// long" %} 10315 size(4); 10316 ins_encode( enc_andi(dst, src1, src2) ); 10317 ins_pipe(pipe_class_default); 10318 %} 10319 10320 // Immediate And Long where the immediate is a negative power of 2. 10321 instruct andL_reg_immLnegpow2(iRegLdst dst, iRegLsrc src1, immLnegpow2 src2) %{ 10322 match(Set dst (AndL src1 src2)); 10323 format %{ "ANDDI $dst, $src1, $src2" %} 10324 size(4); 10325 ins_encode( enc_andL_reg_immLnegpow2(dst, src1, src2) ); 10326 ins_pipe(pipe_class_default); 10327 %} 10328 10329 instruct andL_reg_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immLpow2minus1 src2) %{ 10330 match(Set dst (AndL src1 src2)); 10331 format %{ "ANDDI $dst, $src1, $src2" %} 10332 size(4); 10333 ins_encode( enc_and_reg_immpow2minus1(dst, src1, src2) ); 10334 ins_pipe(pipe_class_default); 10335 %} 10336 10337 // AndL + ConvL2I. 10338 instruct convL2I_andL_reg_immLpow2minus1(iRegIdst dst, iRegLsrc src1, immLpow2minus1 src2) %{ 10339 match(Set dst (ConvL2I (AndL src1 src2))); 10340 ins_cost(DEFAULT_COST); 10341 10342 format %{ "ANDDI $dst, $src1, $src2 \t// long + l2i" %} 10343 size(4); 10344 ins_encode( enc_and_reg_immpow2minus1(dst, src1, src2) ); 10345 ins_pipe(pipe_class_default); 10346 %} 10347 10348 // Or Instructions 10349 10350 // Register Or 10351 instruct orI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 10352 match(Set dst (OrI src1 src2)); 10353 format %{ "OR $dst, $src1, $src2" %} 10354 size(4); 10355 ins_encode( enc_or(dst, src1, src2) ); 10356 ins_pipe(pipe_class_default); 10357 %} 10358 10359 // Expand does not work with above instruct. (??) 10360 instruct orI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 10361 // no match-rule 10362 effect(DEF dst, USE src1, USE src2); 10363 format %{ "OR $dst, $src1, $src2" %} 10364 size(4); 10365 ins_encode( enc_or(dst, src1, src2) ); 10366 ins_pipe(pipe_class_default); 10367 %} 10368 10369 instruct tree_orI_orI_orI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{ 10370 match(Set dst (OrI (OrI (OrI src1 src2) src3) src4)); 10371 ins_cost(DEFAULT_COST*3); 10372 10373 expand %{ 10374 // FIXME: we should do this in the ideal world. 10375 iRegIdst tmp1; 10376 iRegIdst tmp2; 10377 orI_reg_reg(tmp1, src1, src2); 10378 orI_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg. 10379 orI_reg_reg(dst, tmp1, tmp2); 10380 %} 10381 %} 10382 10383 // Immediate Or 10384 instruct orI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{ 10385 match(Set dst (OrI src1 src2)); 10386 format %{ "ORI $dst, $src1, $src2" %} 10387 size(4); 10388 ins_encode( enc_ori(dst, src1, src2) ); 10389 ins_pipe(pipe_class_default); 10390 %} 10391 10392 // Register Or Long 10393 instruct orL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 10394 match(Set dst (OrL src1 src2)); 10395 ins_cost(DEFAULT_COST); 10396 10397 size(4); 10398 format %{ "OR $dst, $src1, $src2 \t// long" %} 10399 ins_encode( enc_or(dst, src1, src2) ); 10400 ins_pipe(pipe_class_default); 10401 %} 10402 10403 // OrL + ConvL2I. 10404 instruct orI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{ 10405 match(Set dst (ConvL2I (OrL src1 src2))); 10406 ins_cost(DEFAULT_COST); 10407 10408 format %{ "OR $dst, $src1, $src2 \t// long + l2i" %} 10409 size(4); 10410 ins_encode( enc_or(dst, src1, src2) ); 10411 ins_pipe(pipe_class_default); 10412 %} 10413 10414 // Immediate Or long 10415 instruct orL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 con) %{ 10416 match(Set dst (OrL src1 con)); 10417 ins_cost(DEFAULT_COST); 10418 10419 format %{ "ORI $dst, $src1, $con \t// long" %} 10420 size(4); 10421 ins_encode( enc_ori(dst, src1, con) ); 10422 ins_pipe(pipe_class_default); 10423 %} 10424 10425 // Xor Instructions 10426 10427 // Register Xor 10428 instruct xorI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 10429 match(Set dst (XorI src1 src2)); 10430 format %{ "XOR $dst, $src1, $src2" %} 10431 size(4); 10432 ins_encode( enc_xor(dst, src1, src2) ); 10433 ins_pipe(pipe_class_default); 10434 %} 10435 10436 // Expand does not work with above instruct. (??) 10437 instruct xorI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 10438 // no match-rule 10439 effect(DEF dst, USE src1, USE src2); 10440 format %{ "XOR $dst, $src1, $src2" %} 10441 size(4); 10442 ins_encode( enc_xor(dst, src1, src2) ); 10443 ins_pipe(pipe_class_default); 10444 %} 10445 10446 instruct tree_xorI_xorI_xorI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{ 10447 match(Set dst (XorI (XorI (XorI src1 src2) src3) src4)); 10448 ins_cost(DEFAULT_COST*3); 10449 10450 expand %{ 10451 // FIXME: we should do this in the ideal world. 10452 iRegIdst tmp1; 10453 iRegIdst tmp2; 10454 xorI_reg_reg(tmp1, src1, src2); 10455 xorI_reg_reg_2(tmp2, src3, src4); // Adlc complains about xorI_reg_reg. 10456 xorI_reg_reg(dst, tmp1, tmp2); 10457 %} 10458 %} 10459 10460 // Immediate Xor 10461 instruct xorI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{ 10462 match(Set dst (XorI src1 src2)); 10463 format %{ "XORI $dst, $src1, $src2" %} 10464 size(4); 10465 ins_encode( enc_xori(dst, src1, src2) ); 10466 ins_pipe(pipe_class_default); 10467 %} 10468 10469 // Register Xor Long 10470 instruct xorL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 10471 match(Set dst (XorL src1 src2)); 10472 ins_cost(DEFAULT_COST); 10473 10474 format %{ "XOR $dst, $src1, $src2 \t// long" %} 10475 size(4); 10476 ins_encode( enc_xor(dst, src1, src2) ); 10477 ins_pipe(pipe_class_default); 10478 %} 10479 10480 // XorL + ConvL2I. 10481 instruct xorI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{ 10482 match(Set dst (ConvL2I (XorL src1 src2))); 10483 ins_cost(DEFAULT_COST); 10484 10485 format %{ "XOR $dst, $src1, $src2 \t// long + l2i" %} 10486 size(4); 10487 ins_encode( enc_xor(dst, src1, src2) ); 10488 ins_pipe(pipe_class_default); 10489 %} 10490 10491 // Immediate Xor Long 10492 instruct xorL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 con) %{ 10493 match(Set dst (XorL src1 con)); 10494 ins_cost(DEFAULT_COST); 10495 10496 format %{ "XORI $dst, $src1, $con \t// long" %} 10497 size(4); 10498 ins_encode( enc_xori(dst, src1, con) ); 10499 ins_pipe(pipe_class_default); 10500 %} 10501 10502 instruct notI_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{ 10503 match(Set dst (XorI src1 src2)); 10504 ins_cost(DEFAULT_COST); 10505 10506 format %{ "NOT $dst, $src1 ($src2)" %} 10507 size(4); 10508 ins_encode( enc_not(dst, src1) ); 10509 ins_pipe(pipe_class_default); 10510 %} 10511 10512 instruct notL_reg(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{ 10513 match(Set dst (XorL src1 src2)); 10514 ins_cost(DEFAULT_COST); 10515 10516 format %{ "NOT $dst, $src1 ($src2) \t// long" %} 10517 size(4); 10518 ins_encode( enc_not(dst, src1) ); 10519 ins_pipe(pipe_class_default); 10520 %} 10521 10522 // And-complement 10523 instruct andcI_reg_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2, iRegIsrc src3) %{ 10524 match(Set dst (AndI (XorI src1 src2) src3)); 10525 ins_cost(DEFAULT_COST); 10526 10527 format %{ "ANDW $dst, xori($src1, $src2), $src3" %} 10528 size(4); 10529 ins_encode( enc_andc(dst, src3, src1) ); 10530 ins_pipe(pipe_class_default); 10531 %} 10532 10533 // And-complement 10534 instruct andcL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{ 10535 // no match-rule, false predicate 10536 effect(DEF dst, USE src1, USE src2); 10537 predicate(false); 10538 10539 format %{ "ANDC $dst, $src1, $src2" %} 10540 size(4); 10541 ins_encode( enc_andc(dst, src1, src2) ); 10542 ins_pipe(pipe_class_default); 10543 %} 10544 10545 //----------Moves between int/long and float/double---------------------------- 10546 // 10547 // The following rules move values from int/long registers/stack-locations 10548 // to float/double registers/stack-locations and vice versa, without doing any 10549 // conversions. These rules are used to implement the bit-conversion methods 10550 // of java.lang.Float etc., e.g. 10551 // int floatToIntBits(float value) 10552 // float intBitsToFloat(int bits) 10553 // 10554 // Notes on the implementation on ppc64: 10555 // We only provide rules which move between a register and a stack-location, 10556 // because we always have to go through memory when moving between a float 10557 // register and an integer register. 10558 10559 //---------- Chain stack slots between similar types -------- 10560 10561 // These are needed so that the rules below can match. 10562 10563 // Load integer from stack slot 10564 instruct stkI_to_regI(iRegIdst dst, stackSlotI src) %{ 10565 match(Set dst src); 10566 ins_cost(MEMORY_REF_COST); 10567 10568 format %{ "LWZ $dst, $src" %} 10569 size(4); 10570 ins_encode( enc_lwz(dst, src) ); 10571 ins_pipe(pipe_class_memory); 10572 %} 10573 10574 // Store integer to stack slot 10575 instruct regI_to_stkI(stackSlotI dst, iRegIsrc src) %{ 10576 match(Set dst src); 10577 ins_cost(MEMORY_REF_COST); 10578 10579 format %{ "STW $src, $dst \t// stk" %} 10580 size(4); 10581 ins_encode( enc_stw(src, dst) ); // rs=rt 10582 ins_pipe(pipe_class_memory); 10583 %} 10584 10585 // Load long from stack slot 10586 instruct stkL_to_regL(iRegLdst dst, stackSlotL src) %{ 10587 match(Set dst src); 10588 ins_cost(MEMORY_REF_COST); 10589 10590 format %{ "LD $dst, $src \t// long" %} 10591 size(4); 10592 ins_encode( enc_ld(dst, src) ); 10593 ins_pipe(pipe_class_memory); 10594 %} 10595 10596 // Store long to stack slot 10597 instruct regL_to_stkL(stackSlotL dst, iRegLsrc src) %{ 10598 match(Set dst src); 10599 ins_cost(MEMORY_REF_COST); 10600 10601 format %{ "STD $src, $dst \t// long" %} 10602 size(4); 10603 ins_encode( enc_std(src, dst) ); // rs=rt 10604 ins_pipe(pipe_class_memory); 10605 %} 10606 10607 //----------Moves between int and float 10608 10609 // Move float value from float stack-location to integer register. 10610 instruct moveF2I_stack_reg(iRegIdst dst, stackSlotF src) %{ 10611 match(Set dst (MoveF2I src)); 10612 ins_cost(MEMORY_REF_COST); 10613 10614 format %{ "LWZ $dst, $src \t// MoveF2I" %} 10615 size(4); 10616 ins_encode( enc_lwz(dst, src) ); 10617 ins_pipe(pipe_class_memory); 10618 %} 10619 10620 // Move float value from float register to integer stack-location. 10621 instruct moveF2I_reg_stack(stackSlotI dst, regF src) %{ 10622 match(Set dst (MoveF2I src)); 10623 ins_cost(MEMORY_REF_COST); 10624 10625 format %{ "STFS $src, $dst \t// MoveF2I" %} 10626 size(4); 10627 ins_encode( enc_stfs(src, dst) ); 10628 ins_pipe(pipe_class_memory); 10629 %} 10630 10631 // Move integer value from integer stack-location to float register. 10632 instruct moveI2F_stack_reg(regF dst, stackSlotI src) %{ 10633 match(Set dst (MoveI2F src)); 10634 ins_cost(MEMORY_REF_COST); 10635 10636 format %{ "LFS $dst, $src \t// MoveI2F" %} 10637 size(4); 10638 ins_encode( enc_lfs(dst, src) ); 10639 ins_pipe(pipe_class_memory); 10640 %} 10641 10642 // Move integer value from integer register to float stack-location. 10643 instruct moveI2F_reg_stack(stackSlotF dst, iRegIsrc src) %{ 10644 match(Set dst (MoveI2F src)); 10645 ins_cost(MEMORY_REF_COST); 10646 10647 format %{ "STW $src, $dst \t// MoveI2F" %} 10648 size(4); 10649 ins_encode( enc_stw(src, dst) ); 10650 ins_pipe(pipe_class_memory); 10651 %} 10652 10653 //----------Moves between long and float 10654 10655 instruct moveF2L_reg_stack(stackSlotL dst, regF src) %{ 10656 // no match-rule, false predicate 10657 effect(DEF dst, USE src); 10658 predicate(false); 10659 10660 format %{ "storeD $src, $dst \t// STACK" %} 10661 size(4); 10662 ins_encode( enc_stfd(src, dst) ); 10663 ins_pipe(pipe_class_default); 10664 %} 10665 10666 //----------Moves between long and double 10667 10668 // Move double value from double stack-location to long register. 10669 instruct moveD2L_stack_reg(iRegLdst dst, stackSlotD src) %{ 10670 match(Set dst (MoveD2L src)); 10671 ins_cost(MEMORY_REF_COST); 10672 size(4); 10673 format %{ "LD $dst, $src \t// MoveD2L" %} 10674 ins_encode( enc_ld(dst, src) ); 10675 ins_pipe(pipe_class_memory); 10676 %} 10677 10678 // Move double value from double register to long stack-location. 10679 instruct moveD2L_reg_stack(stackSlotL dst, regD src) %{ 10680 match(Set dst (MoveD2L src)); 10681 effect(DEF dst, USE src); 10682 ins_cost(MEMORY_REF_COST); 10683 10684 format %{ "STFD $src, $dst \t// MoveD2L" %} 10685 size(4); 10686 ins_encode( enc_stfd(src, dst) ); 10687 ins_pipe(pipe_class_memory); 10688 %} 10689 10690 // Move long value from long stack-location to double register. 10691 instruct moveL2D_stack_reg(regD dst, stackSlotL src) %{ 10692 match(Set dst (MoveL2D src)); 10693 ins_cost(MEMORY_REF_COST); 10694 10695 format %{ "LFD $dst, $src \t// MoveL2D" %} 10696 size(4); 10697 ins_encode( enc_lfd(dst, src) ); 10698 ins_pipe(pipe_class_memory); 10699 %} 10700 10701 // Move long value from long register to double stack-location. 10702 instruct moveL2D_reg_stack(stackSlotD dst, iRegLsrc src) %{ 10703 match(Set dst (MoveL2D src)); 10704 ins_cost(MEMORY_REF_COST); 10705 10706 format %{ "STD $src, $dst \t// MoveL2D" %} 10707 size(4); 10708 ins_encode( enc_std(src, dst) ); 10709 ins_pipe(pipe_class_memory); 10710 %} 10711 10712 //----------Register Move Instructions----------------------------------------- 10713 10714 // Replicate for Superword 10715 10716 instruct moveReg(iRegLdst dst, iRegIsrc src) %{ 10717 predicate(false); 10718 effect(DEF dst, USE src); 10719 10720 format %{ "MR $dst, $src \t// replicate " %} 10721 // variable size, 0 or 4. 10722 ins_encode( enc_mr_if_needed(dst, src) ); 10723 ins_pipe(pipe_class_default); 10724 %} 10725 10726 //----------Cast instructions (Java-level type cast)--------------------------- 10727 10728 // Cast Long to Pointer for unsafe natives. 10729 instruct castX2P(iRegPdst dst, iRegLsrc src) %{ 10730 match(Set dst (CastX2P src)); 10731 10732 format %{ "MR $dst, $src \t// Long->Ptr" %} 10733 // variable size, 0 or 4. 10734 ins_encode( enc_mr_if_needed(dst, src) ); 10735 ins_pipe(pipe_class_default); 10736 %} 10737 10738 // Cast Pointer to Long for unsafe natives. 10739 instruct castP2X(iRegLdst dst, iRegP_N2P src) %{ 10740 match(Set dst (CastP2X src)); 10741 10742 format %{ "MR $dst, $src \t// Ptr->Long" %} 10743 // variable size, 0 or 4. 10744 ins_encode( enc_mr_if_needed(dst, src) ); 10745 ins_pipe(pipe_class_default); 10746 %} 10747 10748 instruct castPP(iRegPdst dst) %{ 10749 match(Set dst (CastPP dst)); 10750 format %{ " -- \t// castPP of $dst" %} 10751 size(0); 10752 ins_encode( /*empty*/ ); 10753 ins_pipe(pipe_class_default); 10754 %} 10755 10756 instruct castII(iRegIdst dst) %{ 10757 match(Set dst (CastII dst)); 10758 format %{ " -- \t// castII of $dst" %} 10759 size(0); 10760 ins_encode( /*empty*/ ); 10761 ins_pipe(pipe_class_default); 10762 %} 10763 10764 instruct checkCastPP(iRegPdst dst) %{ 10765 match(Set dst (CheckCastPP dst)); 10766 format %{ " -- \t// checkcastPP of $dst" %} 10767 size(0); 10768 ins_encode( /*empty*/ ); 10769 ins_pipe(pipe_class_default); 10770 %} 10771 10772 //----------Convert instructions----------------------------------------------- 10773 10774 // Convert to boolean. 10775 10776 // int_to_bool(src) : { 1 if src != 0 10777 // { 0 else 10778 // 10779 // strategy: 10780 // 1) Count leading zeros of 32 bit-value src, 10781 // this returns 32 (0b10.0000) iff src == 0 and <32 otherwise. 10782 // 2) Shift 5 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise. 10783 // 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0. 10784 10785 // convI2Bool 10786 instruct convI2Bool_reg__cntlz_Ex(iRegIdst dst, iRegIsrc src) %{ 10787 match(Set dst (Conv2B src)); 10788 predicate(UseCountLeadingZerosInstructionsPPC64); 10789 ins_cost(DEFAULT_COST); 10790 10791 expand %{ 10792 immI shiftAmount %{ 0x5 %} 10793 uimmI16 mask %{ 0x1 %} 10794 iRegIdst tmp1; 10795 iRegIdst tmp2; 10796 countLeadingZerosI(tmp1, src); 10797 urShiftI_reg_imm(tmp2, tmp1, shiftAmount); 10798 xorI_reg_uimm16(dst, tmp2, mask); 10799 %} 10800 %} 10801 10802 instruct convI2Bool_reg__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx) %{ 10803 match(Set dst (Conv2B src)); 10804 effect(TEMP crx); 10805 predicate(!UseCountLeadingZerosInstructionsPPC64); 10806 ins_cost(DEFAULT_COST); 10807 10808 format %{ "CMPWI $crx, $src, #0 \t// convI2B" 10809 "LI $dst, #0\n\t" 10810 "BEQ $crx, done\n\t" 10811 "LI $dst, #1\n" 10812 "done:" %} 10813 size(16); 10814 ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x0, 0x1) ); 10815 ins_pipe(pipe_class_compare); 10816 %} 10817 10818 // ConvI2B + XorI 10819 instruct xorI_convI2Bool_reg_immIvalue1__cntlz_Ex(iRegIdst dst, iRegIsrc src, immI_1 mask) %{ 10820 match(Set dst (XorI (Conv2B src) mask)); 10821 predicate(UseCountLeadingZerosInstructionsPPC64); 10822 ins_cost(DEFAULT_COST); 10823 10824 expand %{ 10825 immI shiftAmount %{ 0x5 %} 10826 iRegIdst tmp1; 10827 countLeadingZerosI(tmp1, src); 10828 urShiftI_reg_imm(dst, tmp1, shiftAmount); 10829 %} 10830 %} 10831 10832 instruct xorI_convI2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI_1 mask) %{ 10833 match(Set dst (XorI (Conv2B src) mask)); 10834 effect(TEMP crx); 10835 predicate(!UseCountLeadingZerosInstructionsPPC64); 10836 ins_cost(DEFAULT_COST); 10837 10838 format %{ "CMPWI $crx, $src, #0 \t// Xor(convI2B($src), $mask)" 10839 "LI $dst, #1\n\t" 10840 "BEQ $crx, done\n\t" 10841 "LI $dst, #0\n" 10842 "done:" %} 10843 size(16); 10844 ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x1, 0x0) ); 10845 ins_pipe(pipe_class_compare); 10846 %} 10847 10848 // AndI 0b0..010..0 + ConvI2B 10849 instruct convI2Bool_andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src, immIpowerOf2 mask) %{ 10850 match(Set dst (Conv2B (AndI src mask))); 10851 predicate(UseRotateAndMaskInstructionsPPC64); 10852 ins_cost(DEFAULT_COST); 10853 10854 format %{ "RLWINM $dst, $src, $mask \t// convI2B(AndI($src, $mask))" %} 10855 size(4); 10856 ins_encode( enc_convI2B_andI_reg_immIpowerOf2(dst, src, mask) ); 10857 ins_pipe(pipe_class_default); 10858 %} 10859 10860 // Convert pointer to boolean. 10861 // 10862 // ptr_to_bool(src) : { 1 if src != 0 10863 // { 0 else 10864 // 10865 // strategy: 10866 // 1) Count leading zeros of 64 bit-value src, 10867 // this returns 64 (0b100.0000) iff src == 0 and <64 otherwise. 10868 // 2) Shift 6 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise. 10869 // 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0. 10870 10871 // ConvP2B 10872 instruct convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src) %{ 10873 match(Set dst (Conv2B src)); 10874 predicate(UseCountLeadingZerosInstructionsPPC64); 10875 ins_cost(DEFAULT_COST); 10876 10877 expand %{ 10878 immI shiftAmount %{ 0x6 %} 10879 uimmI16 mask %{ 0x1 %} 10880 iRegIdst tmp1; 10881 iRegIdst tmp2; 10882 countLeadingZerosP(tmp1, src); 10883 urShiftI_reg_imm(tmp2, tmp1, shiftAmount); 10884 xorI_reg_uimm16(dst, tmp2, mask); 10885 %} 10886 %} 10887 10888 instruct convP2Bool_reg__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx) %{ 10889 match(Set dst (Conv2B src)); 10890 effect(TEMP crx); 10891 predicate(!UseCountLeadingZerosInstructionsPPC64); 10892 ins_cost(DEFAULT_COST); 10893 10894 format %{ "CMPDI $crx, $src, #0 \t// convP2B" 10895 "LI $dst, #0\n\t" 10896 "BEQ $crx, done\n\t" 10897 "LI $dst, #1\n" 10898 "done:" %} 10899 size(16); 10900 ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x0, 0x1) ); 10901 ins_pipe(pipe_class_compare); 10902 %} 10903 10904 // ConvP2B + XorI 10905 instruct xorI_convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src, immI_1 mask) %{ 10906 match(Set dst (XorI (Conv2B src) mask)); 10907 predicate(UseCountLeadingZerosInstructionsPPC64); 10908 ins_cost(DEFAULT_COST); 10909 10910 expand %{ 10911 immI shiftAmount %{ 0x6 %} 10912 iRegIdst tmp1; 10913 countLeadingZerosP(tmp1, src); 10914 urShiftI_reg_imm(dst, tmp1, shiftAmount); 10915 %} 10916 %} 10917 10918 instruct xorI_convP2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx, immI_1 mask) %{ 10919 match(Set dst (XorI (Conv2B src) mask)); 10920 effect(TEMP crx); 10921 predicate(!UseCountLeadingZerosInstructionsPPC64); 10922 ins_cost(DEFAULT_COST); 10923 10924 format %{ "CMPDI $crx, $src, #0 \t// XorI(convP2B($src), $mask)" 10925 "LI $dst, #1\n\t" 10926 "BEQ $crx, done\n\t" 10927 "LI $dst, #0\n" 10928 "done:" %} 10929 size(16); 10930 ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x1, 0x0) ); 10931 ins_pipe(pipe_class_compare); 10932 %} 10933 10934 // if src1 < src2, return -1 else return 0 10935 instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 10936 match(Set dst (CmpLTMask src1 src2)); 10937 ins_cost(DEFAULT_COST*4); 10938 10939 expand %{ 10940 iRegLdst src1s; 10941 iRegLdst src2s; 10942 iRegLdst diff; 10943 convI2L_reg(src1s, src1); // Ensure proper sign extension. 10944 convI2L_reg(src2s, src2); // Ensure proper sign extension. 10945 subL_reg_reg(diff, src1s, src2s); 10946 // Need to consider >=33 bit result, therefore we need signmaskL. 10947 signmask64I_regL(dst, diff); 10948 %} 10949 %} 10950 10951 instruct cmpLTMask_reg_immI0(iRegIdst dst, iRegIsrc src1, immI_0 src2) %{ 10952 match(Set dst (CmpLTMask src1 src2)); // if src1 < src2, return -1 else return 0 10953 format %{ "SRAWI $dst, $src1, $src2 \t// CmpLTMask" %} 10954 size(4); 10955 ins_encode( enc_srawi(dst, src1, 0x1f) ); 10956 ins_pipe(pipe_class_default); 10957 %} 10958 10959 //----------Arithmetic Conversion Instructions--------------------------------- 10960 10961 // Convert to Byte -- nop 10962 // Convert to Short -- nop 10963 10964 // Convert to Int 10965 10966 instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{ 10967 match(Set dst (RShiftI (LShiftI src amount) amount)); 10968 format %{ "EXTSB $dst, $src \t// byte->int" %} 10969 size(4); 10970 ins_encode( enc_extsb(dst, src) ); 10971 ins_pipe(pipe_class_default); 10972 %} 10973 10974 // LShiftI 16 + RShiftI 16 converts short to int. 10975 instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{ 10976 match(Set dst (RShiftI (LShiftI src amount) amount)); 10977 format %{ "EXTSH $dst, $src \t// short->int" %} 10978 size(4); 10979 ins_encode( enc_extsh(dst, src) ); 10980 ins_pipe(pipe_class_default); 10981 %} 10982 10983 // ConvL2I + ConvI2L: Sign extend int in long register. 10984 instruct sxtI_L2L_reg(iRegLdst dst, iRegLsrc src) %{ 10985 match(Set dst (ConvI2L (ConvL2I src))); 10986 10987 format %{ "EXTSW $dst, $src \t// long->long" %} 10988 size(4); 10989 ins_encode( enc_extswLL(dst, src) ); 10990 ins_pipe(pipe_class_default); 10991 %} 10992 10993 instruct convL2I_reg(iRegIdst dst, iRegLsrc src) %{ 10994 match(Set dst (ConvL2I src)); 10995 format %{ "MR $dst, $src \t// long->int" %} 10996 // variable size, 0 or 4 10997 ins_encode( enc_mr_if_needed(dst, src) ); 10998 ins_pipe(pipe_class_default); 10999 %} 11000 11001 instruct convD2IRaw_regD(regD dst, regD src) %{ 11002 // no match-rule, false predicate 11003 effect(DEF dst, USE src); 11004 predicate(false); 11005 11006 format %{ "FCTIWZ $dst, $src \t// convD2I, $src != NaN" %} 11007 size(4); 11008 ins_encode( enc_fctiwz(dst, src) ); 11009 ins_pipe(pipe_class_default); 11010 %} 11011 11012 instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsReg crx, stackSlotL src) %{ 11013 // no match-rule, false predicate 11014 effect(DEF dst, USE crx, USE src); 11015 predicate(false); 11016 11017 ins_variable_size_depending_on_alignment(true); 11018 11019 format %{ "cmovI $crx, $dst, $src" %} 11020 // Worst case is branch + move + stop, no stop without scheduler. 11021 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 11022 ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) ); 11023 ins_pipe(pipe_class_default); 11024 %} 11025 11026 instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsReg crx, stackSlotL src) %{ 11027 // no match-rule, false predicate 11028 effect(DEF dst, USE crx, USE src); 11029 predicate(false); 11030 11031 format %{ "CmovI $dst, $crx, $src \t// late expanded" %} 11032 lateExpand( lateExpand_cmovI_bso_stackSlotL_conLValue0(dst, crx, src) ); 11033 %} 11034 11035 // Double to Int conversion, NaN is mapped to 0. 11036 instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{ 11037 match(Set dst (ConvD2I src)); 11038 ins_cost(DEFAULT_COST); 11039 11040 expand %{ 11041 regD tmpD; 11042 stackSlotL tmpS; 11043 flagsReg crx; 11044 cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN. 11045 convD2IRaw_regD(tmpD, src); // Convert float to int (speculated). 11046 moveD2L_reg_stack(tmpS, tmpD); // Store float to stack (speculated). 11047 cmovI_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check. 11048 %} 11049 %} 11050 11051 instruct convF2IRaw_regF(regF dst, regF src) %{ 11052 // no match-rule, false predicate 11053 effect(DEF dst, USE src); 11054 predicate(false); 11055 11056 format %{ "FCTIWZ $dst, $src \t// convF2I, $src != NaN" %} 11057 size(4); 11058 ins_encode( enc_fctiwz(dst, src) ); 11059 ins_pipe(pipe_class_default); 11060 %} 11061 11062 // Float to Int conversion, NaN is mapped to 0. 11063 instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{ 11064 match(Set dst (ConvF2I src)); 11065 ins_cost(DEFAULT_COST); 11066 11067 expand %{ 11068 regF tmpF; 11069 stackSlotL tmpS; 11070 flagsReg crx; 11071 cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN. 11072 convF2IRaw_regF(tmpF, src); // Convert float to int (speculated). 11073 moveF2L_reg_stack(tmpS, tmpF); // Store float to stack (speculated). 11074 cmovI_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check. 11075 %} 11076 %} 11077 11078 // Convert to Long 11079 11080 instruct convI2L_reg(iRegLdst dst, iRegIsrc src) %{ 11081 match(Set dst (ConvI2L src)); 11082 format %{ "EXTSW $dst, $src \t// int->long" %} 11083 size(4); 11084 ins_encode( enc_extsw(dst, src) ); 11085 ins_pipe(pipe_class_default); 11086 %} 11087 11088 // Zero-extend: convert unsigned int to long (convUI2L). 11089 instruct zeroExtendL_regI(iRegLdst dst, iRegIsrc src, immL_32bits mask) %{ 11090 match(Set dst (AndL (ConvI2L src) mask)); 11091 ins_cost(DEFAULT_COST); 11092 11093 format %{ "CLRLDI $dst, $src, #32 \t// zero-extend int to long" %} 11094 size(4); 11095 ins_encode( enc_zeroextendw(dst, src) ); 11096 ins_pipe(pipe_class_default); 11097 %} 11098 11099 // Zero-extend: convert unsigned int to long in long register. 11100 instruct zeroExtendL_regL(iRegLdst dst, iRegLsrc src, immL_32bits mask) %{ 11101 match(Set dst (AndL src mask)); 11102 ins_cost(DEFAULT_COST); 11103 11104 format %{ "CLRLDI $dst, $src, #32 \t// zero-extend int to long" %} 11105 size(4); 11106 ins_encode( enc_zeroextendw(dst, src) ); 11107 ins_pipe(pipe_class_default); 11108 %} 11109 11110 instruct convF2LRaw_regF(regF dst, regF src) %{ 11111 // no match-rule, false predicate 11112 effect(DEF dst, USE src); 11113 predicate(false); 11114 11115 format %{ "FCTIDZ $dst, $src \t// convF2L, $src != NaN" %} 11116 size(4); 11117 ins_encode( enc_fctidz(dst, src) ); 11118 ins_pipe(pipe_class_default); 11119 %} 11120 11121 instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL src) %{ 11122 // no match-rule, false predicate 11123 effect(DEF dst, USE crx, USE src); 11124 predicate(false); 11125 11126 ins_variable_size_depending_on_alignment(true); 11127 11128 format %{ "cmovL $crx, $dst, $src" %} 11129 // Worst case is branch + move + stop, no stop without scheduler. 11130 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8); 11131 ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) ); 11132 ins_pipe(pipe_class_default); 11133 %} 11134 11135 instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsReg crx, stackSlotL src) %{ 11136 // no match-rule, false predicate 11137 effect(DEF dst, USE crx, USE src); 11138 predicate(false); 11139 11140 format %{ "CmovL $dst, $crx, $src \t// late expanded" %} 11141 lateExpand( lateExpand_cmovL_bso_stackSlotL_conLvalue0(dst, crx, src) ); 11142 %} 11143 11144 // Float to Long conversion, NaN is mapped to 0. 11145 instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{ 11146 match(Set dst (ConvF2L src)); 11147 ins_cost(DEFAULT_COST); 11148 11149 expand %{ 11150 regF tmpF; 11151 stackSlotL tmpS; 11152 flagsReg crx; 11153 cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN. 11154 convF2LRaw_regF(tmpF, src); // Convert float to long (speculated). 11155 moveF2L_reg_stack(tmpS, tmpF); // Store float to stack (speculated). 11156 cmovL_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check. 11157 %} 11158 %} 11159 11160 instruct convD2LRaw_regD(regD dst, regD src) %{ 11161 // no match-rule, false predicate 11162 effect(DEF dst, USE src); 11163 predicate(false); 11164 11165 format %{ "FCTIDZ $dst, $src \t// convD2L $src != NaN" %} 11166 size(4); 11167 ins_encode( enc_fctidz(dst, src) ); 11168 ins_pipe(pipe_class_default); 11169 %} 11170 11171 // Double to Long conversion, NaN is mapped to 0. 11172 instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{ 11173 match(Set dst (ConvD2L src)); 11174 ins_cost(DEFAULT_COST); 11175 11176 expand %{ 11177 regD tmpD; 11178 stackSlotL tmpS; 11179 flagsReg crx; 11180 cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN. 11181 convD2LRaw_regD(tmpD, src); // Convert float to long (speculated). 11182 moveD2L_reg_stack(tmpS, tmpD); // Store float to stack (speculated). 11183 cmovL_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check. 11184 %} 11185 %} 11186 11187 // Convert to Float 11188 11189 // Placed here as needed in expand. 11190 instruct convL2DRaw_regD(regD dst, regD src) %{ 11191 // no match-rule, false predicate 11192 effect(DEF dst, USE src); 11193 predicate(false); 11194 11195 format %{ "FCFID $dst, $src \t// convL2D" %} 11196 size(4); 11197 ins_encode( enc_fcfid(dst, src) ); 11198 ins_pipe(pipe_class_default); 11199 %} 11200 11201 // Placed here as needed in expand. 11202 instruct convD2F_reg(regF dst, regD src) %{ 11203 match(Set dst (ConvD2F src)); 11204 format %{ "FRSP $dst, $src \t// convD2F" %} 11205 size(4); 11206 ins_encode( enc_frsp(dst, src)); 11207 ins_pipe(pipe_class_default); 11208 %} 11209 11210 // Integer to Float conversion. 11211 instruct convI2F_ireg_Ex(regF dst, iRegIsrc src) %{ 11212 match(Set dst (ConvI2F src)); 11213 predicate(!VM_Version::has_fcfids()); 11214 ins_cost(DEFAULT_COST); 11215 11216 expand %{ 11217 iRegLdst tmpL; 11218 stackSlotL tmpS; 11219 regD tmpD; 11220 regD tmpD2; 11221 convI2L_reg(tmpL, src); // Sign-extension int to long. 11222 regL_to_stkL(tmpS, tmpL); // Store long to stack. 11223 moveL2D_stack_reg(tmpD, tmpS); // Load long into double register. 11224 convL2DRaw_regD(tmpD2, tmpD); // Convert to double. 11225 convD2F_reg(dst, tmpD2); // Convert double to float. 11226 %} 11227 %} 11228 11229 instruct convL2FRaw_regF(regF dst, regD src) %{ 11230 // no match-rule, false predicate 11231 effect(DEF dst, USE src); 11232 predicate(false); 11233 11234 format %{ "FCFIDS $dst, $src \t// convL2F" %} 11235 size(4); 11236 ins_encode( enc_fcfids(dst, src) ); 11237 ins_pipe(pipe_class_default); 11238 %} 11239 11240 // Integer to Float conversion. Special version for Power7. 11241 instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{ 11242 match(Set dst (ConvI2F src)); 11243 predicate(VM_Version::has_fcfids()); 11244 ins_cost(DEFAULT_COST); 11245 11246 expand %{ 11247 iRegLdst tmpL; 11248 stackSlotL tmpS; 11249 regD tmpD; 11250 convI2L_reg(tmpL, src); // Sign-extension int to long. 11251 regL_to_stkL(tmpS, tmpL); // Store long to stack. 11252 moveL2D_stack_reg(tmpD, tmpS); // Load long into double register. 11253 convL2FRaw_regF(dst, tmpD); // Convert to float. 11254 %} 11255 %} 11256 11257 // L2F to avoid runtime call. 11258 instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{ 11259 match(Set dst (ConvL2F src)); 11260 predicate(VM_Version::has_fcfids()); 11261 ins_cost(DEFAULT_COST); 11262 11263 expand %{ 11264 stackSlotL tmpS; 11265 regD tmpD; 11266 regL_to_stkL(tmpS, src); // Store long to stack. 11267 moveL2D_stack_reg(tmpD, tmpS); // Load long into double register. 11268 convL2FRaw_regF(dst, tmpD); // Convert to float. 11269 %} 11270 %} 11271 11272 // Moved up as used in expand. 11273 //instruct convD2F_reg(regF dst, regD src) %{%} 11274 11275 // Convert to Double 11276 11277 // Integer to Double conversion. 11278 instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{ 11279 match(Set dst (ConvI2D src)); 11280 ins_cost(DEFAULT_COST); 11281 11282 expand %{ 11283 iRegLdst tmpL; 11284 stackSlotL tmpS; 11285 regD tmpD; 11286 convI2L_reg(tmpL, src); // Sign-extension int to long. 11287 regL_to_stkL(tmpS, tmpL); // Store long to stack. 11288 moveL2D_stack_reg(tmpD, tmpS); // Load long into double register. 11289 convL2DRaw_regD(dst, tmpD); // Convert to double. 11290 %} 11291 %} 11292 11293 // Long to Double conversion 11294 instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{ 11295 match(Set dst (ConvL2D src)); 11296 ins_cost(DEFAULT_COST + MEMORY_REF_COST); 11297 11298 expand %{ 11299 regD tmpD; 11300 moveL2D_stack_reg(tmpD, src); 11301 convL2DRaw_regD(dst, tmpD); 11302 %} 11303 %} 11304 11305 instruct convF2D_reg(regD dst, regF src) %{ 11306 match(Set dst (ConvF2D src)); 11307 format %{ "FMR $dst, $src \t// float->double" %} 11308 // variable size, 0 or 4 11309 ins_encode( enc_fmr_if_needed(dst, src )); 11310 ins_pipe(pipe_class_default); 11311 %} 11312 11313 //----------Control Flow Instructions------------------------------------------ 11314 // Compare Instructions 11315 11316 // Compare Integers 11317 instruct cmpI_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{ 11318 match(Set crx (CmpI src1 src2)); 11319 size(4); 11320 format %{ "CMPW $crx, $src1, $src2" %} 11321 ins_encode( enc_cmpw(crx, src1, src2) ); 11322 ins_pipe(pipe_class_compare); 11323 %} 11324 11325 instruct cmpI_reg_imm16(flagsReg crx, iRegIsrc src1, immI16 src2) %{ 11326 match(Set crx (CmpI src1 src2)); 11327 format %{ "CMPWI $crx, $src1, $src2" %} 11328 size(4); 11329 ins_encode( enc_cmpwi(crx, src1, src2) ); 11330 ins_pipe(pipe_class_compare); 11331 %} 11332 11333 // (src1 & src2) == 0? 11334 instruct testI_reg_imm(flagsRegCR0 cr0, iRegIsrc src1, uimmI16 src2, immI_0 zero) %{ 11335 match(Set cr0 (CmpI (AndI src1 src2) zero)); 11336 // r0 is killed 11337 format %{ "ANDI R0, $src1, $src2 \t// BTST int" %} 11338 size(4); 11339 ins_encode( enc_andi(R0, src1, src2) ); 11340 ins_pipe(pipe_class_compare); 11341 %} 11342 11343 instruct cmpL_reg_reg(flagsReg crx, iRegLsrc src1, iRegLsrc src2) %{ 11344 match(Set crx (CmpL src1 src2)); 11345 format %{ "CMPD $crx, $src1, $src2" %} 11346 size(4); 11347 ins_encode( enc_cmpd(crx, src1, src2) ); 11348 ins_pipe(pipe_class_compare); 11349 %} 11350 11351 instruct cmpL_reg_imm16(flagsReg crx, iRegLsrc src1, immL16 con) %{ 11352 match(Set crx (CmpL src1 con)); 11353 format %{ "CMPDI $crx, $src1, $con" %} 11354 size(4); 11355 ins_encode( enc_cmpdi(crx, src1, con) ); 11356 ins_pipe(pipe_class_compare); 11357 %} 11358 11359 instruct testL_reg_reg(flagsRegCR0 cr0, iRegLsrc src1, iRegLsrc src2, immL_0 zero) %{ 11360 match(Set cr0 (CmpL (AndL src1 src2) zero)); 11361 // r0 is killed 11362 format %{ "AND R0, $src1, $src2 \t// BTST long" %} 11363 size(4); 11364 ins_encode( enc_btst_reg(src1, src2) ); 11365 ins_pipe(pipe_class_compare); 11366 %} 11367 11368 instruct testL_reg_imm(flagsRegCR0 cr0, iRegLsrc src1, uimmL16 src2, immL_0 zero) %{ 11369 match(Set cr0 (CmpL (AndL src1 src2) zero)); 11370 // r0 is killed 11371 format %{ "ANDI R0, $src1, $src2 \t// BTST long" %} 11372 size(4); 11373 ins_encode( enc_andi(R0, src1, src2) ); 11374 ins_pipe(pipe_class_compare); 11375 %} 11376 11377 instruct cmovI_conIvalueMinus1_conIvalue1(iRegIdst dst, flagsReg crx) %{ 11378 // no match-rule, false predicate 11379 effect(DEF dst, USE crx); 11380 predicate(false); 11381 11382 ins_variable_size_depending_on_alignment(true); 11383 11384 format %{ "cmovI $crx, $dst, -1, 0, +1" %} 11385 // Worst case is branch + move + branch + move + stop, no stop without scheduler. 11386 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 20 : 16); 11387 ins_encode( enc_cmovI_conIvalueMinus1_conIvalue1(dst, crx) ); 11388 ins_pipe(pipe_class_compare); 11389 %} 11390 11391 instruct cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(iRegIdst dst, flagsReg crx) %{ 11392 // no match-rule, false predicate 11393 effect(DEF dst, USE crx); 11394 predicate(false); 11395 11396 format %{ "CmovI $crx, $dst, -1, 0, +1 \t// late expanded" %} 11397 lateExpand( lateExpand_cmovI_conIvalueMinus1_conIvalue0_conIvalue1(dst, crx) ); 11398 %} 11399 11400 // Manifest a CmpL3 result in an integer register. Very painful. 11401 // This is the test to avoid. 11402 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0) 11403 instruct cmpL3_reg_reg_ExEx(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{ 11404 match(Set dst (CmpL3 src1 src2)); 11405 ins_cost(DEFAULT_COST*5+BRANCH_COST); 11406 11407 expand %{ 11408 flagsReg tmp1; 11409 cmpL_reg_reg(tmp1, src1, src2); 11410 cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(dst, tmp1); 11411 %} 11412 %} 11413 11414 // Implicit range checks. 11415 // A range check in the ideal world has one of the following shapes: 11416 // - (If le (CmpU length index)), (IfTrue throw exception) 11417 // - (If lt (CmpU index length)), (IfFalse throw exception) 11418 // 11419 // Match range check 'If le (CmpU length index)'. 11420 instruct rangeCheck_iReg_uimm15(cmpOp cmp, iRegIsrc src_length, uimmI15 index, label labl) %{ 11421 match(If cmp (CmpU src_length index)); 11422 effect(USE labl); 11423 predicate(TrapBasedRangeChecks && 11424 _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le && 11425 PROB_UNLIKELY(_leaf->as_If()->_prob) >= PROB_ALWAYS && 11426 (Matcher::branches_to_uncommon_trap(_leaf))); 11427 11428 ins_is_TrapBasedCheckNode(true); 11429 11430 format %{ "TWI $index $cmp $src_length \t// RangeCheck => trap $labl" %} 11431 size(4); 11432 ins_encode( enc_rangeCheck_le_iReg_uimm15(cmp, src_length, index, labl) ); 11433 ins_pipe(pipe_class_trap); 11434 %} 11435 11436 // Match range check 'If lt (CmpU index length)'. 11437 instruct rangeCheck_iReg_iReg(cmpOp cmp, iRegIsrc src_index, iRegIsrc src_length, label labl) %{ 11438 match(If cmp (CmpU src_index src_length)); 11439 effect(USE labl); 11440 predicate(TrapBasedRangeChecks && 11441 _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt && 11442 _leaf->as_If()->_prob >= PROB_ALWAYS && 11443 (Matcher::branches_to_uncommon_trap(_leaf))); 11444 11445 ins_is_TrapBasedCheckNode(true); 11446 11447 format %{ "TW $src_index $cmp $src_length \t// RangeCheck => trap $labl" %} 11448 size(4); 11449 ins_encode( enc_rangeCheck_ge_iReg_iReg(cmp, src_index, src_length, labl) ); 11450 ins_pipe(pipe_class_trap); 11451 %} 11452 11453 // Match range check 'If lt (CmpU index length)'. 11454 instruct rangeCheck_uimm15_iReg(cmpOp cmp, iRegIsrc src_index, uimmI15 length, label labl) %{ 11455 match(If cmp (CmpU src_index length)); 11456 effect(USE labl); 11457 predicate(TrapBasedRangeChecks && 11458 _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt && 11459 _leaf->as_If()->_prob >= PROB_ALWAYS && 11460 (Matcher::branches_to_uncommon_trap(_leaf))); 11461 11462 ins_is_TrapBasedCheckNode(true); 11463 11464 format %{ "TWI $src_index $cmp $length \t// RangeCheck => trap $labl" %} 11465 size(4); 11466 ins_encode( enc_rangeCheck_ge_iReg_uimm15(cmp, src_index, length, labl) ); 11467 ins_pipe(pipe_class_trap); 11468 %} 11469 11470 instruct compU_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{ 11471 match(Set crx (CmpU src1 src2)); 11472 format %{ "CMPLW $crx, $src1, $src2 \t// unsigned" %} 11473 size(4); 11474 ins_encode( enc_cmplw(crx, src1, src2) ); 11475 ins_pipe(pipe_class_compare); 11476 %} 11477 11478 instruct compU_reg_uimm16(flagsReg crx, iRegIsrc src1, uimmI16 src2) %{ 11479 match(Set crx (CmpU src1 src2)); 11480 size(4); 11481 format %{ "CMPLWI $crx, $src1, $src2" %} 11482 ins_encode( enc_cmplwi(crx, src1, src2) ); 11483 ins_pipe(pipe_class_compare); 11484 %} 11485 11486 // Implicit zero checks (more implicit null checks). 11487 // No constant pool entries required. 11488 instruct zeroCheckN_iReg_imm0(cmpOp cmp, iRegNsrc value, immN_0 zero, label labl) %{ 11489 match(If cmp (CmpN value zero)); 11490 effect(USE labl); 11491 predicate(TrapBasedNullChecks && 11492 _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne && 11493 _leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) && 11494 Matcher::branches_to_uncommon_trap(_leaf)); 11495 ins_cost(1); 11496 11497 ins_is_TrapBasedCheckNode(true); 11498 11499 format %{ "TDI $value $cmp $zero \t// ZeroCheckN => trap $labl" %} 11500 size(4); 11501 ins_encode( enc_zeroCheckP_eq_iReg(cmp, value, zero, labl) ); 11502 ins_pipe(pipe_class_trap); 11503 %} 11504 11505 // Compare narrow oops. 11506 instruct cmpN_reg_reg(flagsReg crx, iRegNsrc src1, iRegNsrc src2) %{ 11507 match(Set crx (CmpN src1 src2)); 11508 11509 size(4); 11510 ins_cost(DEFAULT_COST); 11511 format %{ "CMPLW $crx, $src1, $src2 \t// compressed ptr" %} 11512 ins_encode( enc_cmplw(crx, src1, src2) ); 11513 ins_pipe(pipe_class_compare); 11514 %} 11515 11516 instruct cmpN_reg_imm0(flagsReg crx, iRegNsrc src1, immN_0 src2) %{ 11517 match(Set crx (CmpN src1 src2)); 11518 // Make this more expensive than zeroCheckN_iReg_imm0. 11519 ins_cost(DEFAULT_COST); 11520 11521 format %{ "CMPLWI $crx, $src1, $src2 \t// compressed ptr" %} 11522 size(4); 11523 ins_encode( enc_cmplwi(crx, src1, src2) ); 11524 ins_pipe(pipe_class_compare); 11525 %} 11526 11527 // Implicit zero checks (more implicit null checks). 11528 // No constant pool entries required. 11529 instruct zeroCheckP_reg_imm0(cmpOp cmp, iRegP_N2P value, immP_0 zero, label labl) %{ 11530 match(If cmp (CmpP value zero)); 11531 effect(USE labl); 11532 predicate(TrapBasedNullChecks && 11533 _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne && 11534 _leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) && 11535 Matcher::branches_to_uncommon_trap(_leaf)); 11536 11537 ins_is_TrapBasedCheckNode(true); 11538 11539 format %{ "TDI $value $cmp $zero \t// ZeroCheckP => trap $labl" %} 11540 size(4); 11541 ins_encode( enc_zeroCheckP_eq_iReg(cmp, value, zero, labl) ); 11542 ins_pipe(pipe_class_trap); 11543 %} 11544 11545 // Compare Pointers 11546 instruct cmpP_reg_reg(flagsReg crx, iRegP_N2P src1, iRegP_N2P src2) %{ 11547 match(Set crx (CmpP src1 src2)); 11548 format %{ "CMPLD $crx, $src1, $src2 \t// ptr" %} 11549 size(4); 11550 ins_encode( enc_cmpld(crx, src1, src2) ); 11551 ins_pipe(pipe_class_compare); 11552 %} 11553 11554 // Used in late expand. 11555 instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 con) %{ 11556 // This match rule prevents reordering of node before a safepoint. 11557 // This only makes sense if this instructions is used exclusively 11558 // for the expansion of EncodeP! 11559 match(Set crx (CmpP src1 con)); 11560 predicate(false); 11561 11562 format %{ "CMPDI $crx, $src1, $con" %} 11563 size(4); 11564 ins_encode( enc_cmpdi(crx, src1, con) ); 11565 ins_pipe(pipe_class_compare); 11566 %} 11567 11568 //----------Float Compares---------------------------------------------------- 11569 11570 instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{ 11571 // no match-rule, false predicate 11572 effect(DEF crx, USE src1, USE src2); 11573 predicate(false); 11574 11575 format %{ "cmpFUrd $crx, $src1, $src2" %} 11576 size(4); 11577 ins_encode( enc_fcmpu(crx, src1, src2) ); 11578 ins_pipe(pipe_class_default); 11579 %} 11580 11581 instruct cmov_bns_less(flagsReg crx) %{ 11582 // no match-rule, false predicate 11583 effect(DEF crx); 11584 predicate(false); 11585 11586 ins_variable_size_depending_on_alignment(true); 11587 11588 format %{ "cmov $crx" %} 11589 // Worst case is branch + move + stop, no stop without scheduler. 11590 size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 16 : 12); 11591 ins_encode( enc_cmove_bns_less(crx) ); 11592 ins_pipe(pipe_class_default); 11593 %} 11594 11595 // Compare floating, generate condition code. 11596 instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{ 11597 // FIXME: should we match 'If cmp (CmpF src1 src2))' ?? 11598 // 11599 // The following code sequence occurs a lot in mpegaudio: 11600 // 11601 // block BXX: 11602 // 0: instruct cmpFUnordered_reg_reg (cmpF_reg_reg-0): 11603 // cmpFUrd CCR6, F11, F9 11604 // 4: instruct cmov_bns_less (cmpF_reg_reg-1): 11605 // cmov CCR6 11606 // 8: instruct branchConSched: 11607 // B_FARle CCR6, B56 P=0.500000 C=-1.000000 11608 match(Set crx (CmpF src1 src2)); 11609 ins_cost(DEFAULT_COST+BRANCH_COST); 11610 11611 format %{ "CmpF $crx, $src1, $src2 \t// late expanded" %} 11612 lateExpand( lateExpand_cmpF_reg_reg(crx, src1, src2) ); 11613 %} 11614 11615 // Compare float, generate -1,0,1 11616 instruct cmpF3_reg_reg_ExEx(iRegIdst dst, regF src1, regF src2) %{ 11617 match(Set dst (CmpF3 src1 src2)); 11618 ins_cost(DEFAULT_COST*5+BRANCH_COST); 11619 11620 expand %{ 11621 flagsReg tmp1; 11622 cmpFUnordered_reg_reg(tmp1, src1, src2); 11623 cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(dst, tmp1); 11624 %} 11625 %} 11626 11627 instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{ 11628 // no match-rule, false predicate 11629 effect(DEF crx, USE src1, USE src2); 11630 predicate(false); 11631 11632 format %{ "cmpFUrd $crx, $src1, $src2" %} 11633 size(4); 11634 ins_encode( enc_fcmpu(crx, src1, src2) ); 11635 ins_pipe(pipe_class_default); 11636 %} 11637 11638 instruct cmpD_reg_reg_Ex(flagsReg crx, regD src1, regD src2) %{ 11639 match(Set crx (CmpD src1 src2)); 11640 ins_cost(DEFAULT_COST+BRANCH_COST); 11641 11642 format %{ "CmpD $crx, $src1, $src2 \t// late expanded" %} 11643 lateExpand( lateExpand_cmpD_reg_reg(crx, src1, src2) ); 11644 %} 11645 11646 // Compare double, generate -1,0,1 11647 instruct cmpD3_reg_reg_ExEx(iRegIdst dst, regD src1, regD src2) %{ 11648 match(Set dst (CmpD3 src1 src2)); 11649 ins_cost(DEFAULT_COST*5+BRANCH_COST); 11650 11651 expand %{ 11652 flagsReg tmp1; 11653 cmpDUnordered_reg_reg(tmp1, src1, src2); 11654 cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(dst, tmp1); 11655 %} 11656 %} 11657 11658 //----------Branches--------------------------------------------------------- 11659 // Jump 11660 11661 // Direct Branch. 11662 instruct branch(label labl) %{ 11663 match(Goto); 11664 effect(USE labl); 11665 ins_cost(BRANCH_COST); 11666 11667 format %{ "B $labl" %} 11668 size(4); 11669 ins_encode( enc_b(labl) ); 11670 ins_pipe(pipe_class_default); 11671 %} 11672 11673 // Conditional Near Branch 11674 instruct branchCon(cmpOp cmp, flagsReg crx, label lbl) %{ 11675 // Same match rule as `branchConFar'. 11676 match(If cmp crx); 11677 effect(USE lbl); 11678 ins_cost(BRANCH_COST); 11679 11680 // If set to 1 this indicates that the current instruction is a 11681 // short variant of a long branch. This avoids using this 11682 // instruction in first-pass matching. It will then only be used in 11683 // the `Shorten_branches' pass. 11684 ins_short_branch(1); 11685 11686 format %{ "B$cmp $crx, $lbl" %} 11687 size(4); 11688 ins_encode( enc_bc(crx, cmp, lbl) ); 11689 ins_pipe(pipe_class_default); 11690 %} 11691 11692 // This is for cases when the ppc64 `bc' instruction does not 11693 // reach far enough. So we emit a far branch here, which is more 11694 // expensive. 11695 // 11696 // Conditional Far Branch 11697 instruct branchConFar(cmpOp cmp, flagsReg crx, label lbl) %{ 11698 // Same match rule as `branchCon'. 11699 match(If cmp crx); 11700 effect(USE crx, USE lbl); 11701 predicate(!false /* TODO: PPC port HB_Schedule*/); 11702 // Higher cost than `branchCon'. 11703 ins_cost(5*BRANCH_COST); 11704 11705 // This is not a short variant of a branch, but the long variant. 11706 ins_short_branch(0); 11707 11708 format %{ "B_FAR$cmp $crx, $lbl" %} 11709 size(8); 11710 ins_encode( enc_bc_far(crx, cmp, lbl) ); 11711 ins_pipe(pipe_class_default); 11712 %} 11713 11714 // Conditional Branch used with Power6 scheduler (can be far or short). 11715 instruct branchConSched(cmpOp cmp, flagsReg crx, label lbl) %{ 11716 // Same match rule as `branchCon'. 11717 match(If cmp crx); 11718 effect(USE crx, USE lbl); 11719 predicate(false /* TODO: PPC port HB_Schedule*/); 11720 // Higher cost than `branchCon'. 11721 ins_cost(5*BRANCH_COST); 11722 11723 // Actually size doesn't depend on alignment but on shortening. 11724 ins_variable_size_depending_on_alignment(true); 11725 // long variant. 11726 ins_short_branch(0); 11727 11728 format %{ "B_FAR$cmp $crx, $lbl" %} 11729 size(8); // worst case 11730 ins_encode( enc_bc_short_far(crx, cmp, lbl) ); 11731 ins_pipe(pipe_class_default); 11732 %} 11733 11734 instruct branchLoopEnd(cmpOp cmp, flagsReg crx, label labl) %{ 11735 match(CountedLoopEnd cmp crx); 11736 effect(USE labl); 11737 ins_cost(BRANCH_COST); 11738 11739 // short variant. 11740 ins_short_branch(1); 11741 11742 format %{ "B$cmp $crx, $labl \t// counted loop end" %} 11743 size(4); 11744 ins_encode( enc_bc(crx, cmp, labl) ); 11745 ins_pipe(pipe_class_default); 11746 %} 11747 11748 instruct branchLoopEndFar(cmpOp cmp, flagsReg crx, label labl) %{ 11749 match(CountedLoopEnd cmp crx); 11750 effect(USE labl); 11751 predicate(!false /* TODO: PPC port HB_Schedule */); 11752 ins_cost(BRANCH_COST); 11753 11754 // Long variant. 11755 ins_short_branch(0); 11756 11757 format %{ "B_FAR$cmp $crx, $labl \t// counted loop end" %} 11758 size(8); 11759 ins_encode( enc_bc_far(crx, cmp, labl) ); 11760 ins_pipe(pipe_class_default); 11761 %} 11762 11763 // Conditional Branch used with Power6 scheduler (can be far or short). 11764 instruct branchLoopEndSched(cmpOp cmp, flagsReg crx, label labl) %{ 11765 match(CountedLoopEnd cmp crx); 11766 effect(USE labl); 11767 predicate(false /* TODO: PPC port HB_Schedule */); 11768 // Higher cost than `branchCon'. 11769 ins_cost(5*BRANCH_COST); 11770 11771 // Actually size doesn't depend on alignment but on shortening. 11772 ins_variable_size_depending_on_alignment(true); 11773 // Long variant. 11774 ins_short_branch(0); 11775 11776 format %{ "B_FAR$cmp $crx, $labl \t// counted loop end" %} 11777 size(8); // worst case 11778 ins_encode( enc_bc_short_far(crx, cmp, labl) ); 11779 ins_pipe(pipe_class_default); 11780 %} 11781 11782 // ============================================================================ 11783 // Java runtime operations, intrinsics and other complex operations. 11784 11785 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass 11786 // array for an instance of the superklass. Set a hidden internal cache on a 11787 // hit (cache is checked with exposed code in gen_subtype_check()). Return 11788 // not zero for a miss or zero for a hit. The encoding ALSO sets flags. 11789 // 11790 // GL TODO: Improve this. 11791 // - result should not be a TEMP 11792 // - Add match rule as on sparc avoiding additional Cmp. 11793 instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass, 11794 iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{ 11795 match(Set result (PartialSubtypeCheck subklass superklass)); 11796 effect(TEMP result, TEMP tmp_klass, TEMP tmp_arrayptr); 11797 ins_cost(DEFAULT_COST*10); 11798 11799 format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %} 11800 ins_encode( enc_PartialSubtypeCheck(result, subklass, superklass, tmp_klass, tmp_arrayptr) ); 11801 ins_pipe(pipe_class_default); 11802 %} 11803 11804 // inlined locking and unlocking 11805 11806 instruct cmpFastLock(flagsReg pcc, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{ 11807 match(Set pcc (FastLock oop box)); 11808 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); 11809 11810 format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %} 11811 ins_encode( enc_compiler_fast_lock(pcc, oop, box, tmp1, tmp2, tmp3) ); 11812 ins_pipe(pipe_class_compare); 11813 %} 11814 11815 instruct cmpFastUnlock(flagsReg pcc, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{ 11816 match(Set pcc (FastUnlock oop box)); 11817 effect(TEMP tmp1, TEMP tmp2, TEMP tmp3); 11818 11819 format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %} 11820 ins_encode( enc_compiler_fast_unlock(pcc, oop, box, tmp1, tmp2, tmp3) ); 11821 ins_pipe(pipe_class_compare); 11822 %} 11823 11824 // Align address. 11825 instruct align_addr(iRegPdst dst, iRegPsrc src, immLnegpow2 mask) %{ 11826 match(Set dst (CastX2P (AndL (CastP2X src) mask))); 11827 11828 format %{ "ANDDI $dst, $src, $mask \t// next aligned address" %} 11829 size(4); 11830 ins_encode( enc_andL_reg_immLnegpow2(dst, src, mask) ); 11831 ins_pipe(pipe_class_default); 11832 %} 11833 11834 // Array size computation. 11835 instruct array_size(iRegLdst dst, iRegPsrc end, iRegPsrc start) %{ 11836 match(Set dst (SubL (CastP2X end) (CastP2X start))); 11837 11838 format %{ "SUB $dst, $end, $start \t// array size in bytes" %} 11839 size(4); 11840 ins_encode( enc_subf(dst, start, end)); 11841 ins_pipe(pipe_class_default); 11842 %} 11843 11844 // Clear-array with dynamic array-size. 11845 instruct inlineCallClearArray(rarg1RegL cnt, rarg2RegP base, Universe dummy, regCTR ctr) %{ 11846 match(Set dummy (ClearArray cnt base)); 11847 effect(USE_KILL cnt, USE_KILL base, KILL ctr); 11848 ins_cost(MEMORY_REF_COST); 11849 11850 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 11851 11852 format %{ "ClearArray $cnt, $base" %} 11853 ins_encode( enc_ClearArray_reg_reg(cnt, base) ); 11854 ins_pipe(pipe_class_default); 11855 %} 11856 11857 // String_IndexOf for needle of length 1. 11858 // 11859 // Match needle into immediate operands: no loadConP node needed. Saves one 11860 // register and two instructions over string_indexOf_imm1Node. 11861 // 11862 // Assumes register result differs from all input registers. 11863 // 11864 // Preserves registers haystack, haycnt 11865 // Kills registers tmp1, tmp2 11866 // Defines registers result 11867 // 11868 // Use dst register classes if register gets killed, as it is the case for tmp registers! 11869 // 11870 // Unfortunately this does not match too often. In many situations the AddP is used 11871 // by several nodes, even several StrIndexOf nodes, breaking the match tree. 11872 instruct string_indexOf_imm1_char(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt, 11873 immP needleImm, immL offsetImm, immI_1 needlecntImm, 11874 iRegIdst tmp1, iRegIdst tmp2, 11875 flagsRegCR0 cr0, flagsRegCR1 cr1) %{ 11876 predicate(SpecialStringIndexOf); // type check implicit by parameter type, See Matcher::match_rule_supported 11877 match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm))); 11878 11879 effect(TEMP result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1); 11880 11881 ins_cost(150); 11882 format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]" 11883 "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %} 11884 11885 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted 11886 ins_encode( enc_String_IndexOf_imm1_char(result, haystack, haycnt, needleImm, offsetImm, tmp1, tmp2) ); 11887 ins_pipe(pipe_class_compare); 11888 %} 11889 11890 // String_IndexOf for needle of length 1. 11891 // 11892 // Special case requires less registers and emits less instructions. 11893 // 11894 // Assumes register result differs from all input registers. 11895 // 11896 // Preserves registers haystack, haycnt 11897 // Kills registers tmp1, tmp2, needle 11898 // Defines registers result 11899 // 11900 // Use dst register classes if register gets killed, as it is the case for tmp registers! 11901 instruct string_indexOf_imm1(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt, 11902 rscratch2RegP needle, immI_1 needlecntImm, 11903 iRegIdst tmp1, iRegIdst tmp2, 11904 flagsRegCR0 cr0, flagsRegCR1 cr1) %{ 11905 match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm))); 11906 effect(USE_KILL needle, /* TDEF needle, */ TEMP result, 11907 TEMP tmp1, TEMP tmp2); 11908 // Required for EA: check if it is still a type_array. 11909 predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() && 11910 n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array()); 11911 ins_cost(180); 11912 11913 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 11914 11915 format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]" 11916 " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %} 11917 ins_encode( enc_String_IndexOf_imm1(result, haystack, haycnt, needle, tmp1, tmp2) ); 11918 ins_pipe(pipe_class_compare); 11919 %} 11920 11921 // String_IndexOf. 11922 // 11923 // Length of needle as immediate. This saves instruction loading constant needle 11924 // length. 11925 // @@@ TODO Specify rules for length < 8 or so, and roll out comparison of needle 11926 // completely or do it in vector instruction. This should save registers for 11927 // needlecnt and needle. 11928 // 11929 // Assumes register result differs from all input registers. 11930 // Overwrites haycnt, needlecnt. 11931 // Use dst register classes if register gets killed, as it is the case for tmp registers! 11932 instruct string_indexOf_imm(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, 11933 iRegPsrc needle, uimmI15 needlecntImm, 11934 iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5, 11935 flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{ 11936 match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm))); 11937 effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP result, 11938 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6); 11939 // Required for EA: check if it is still a type_array. 11940 predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() && 11941 n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array()); 11942 ins_cost(250); 11943 11944 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 11945 11946 format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]" 11947 " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %} 11948 ins_encode( enc_String_IndexOf_imm(result, haystack, haycnt, needle, needlecntImm, tmp1, tmp2, tmp3, tmp4, tmp5) ); 11949 ins_pipe(pipe_class_compare); 11950 %} 11951 11952 // StrIndexOf node. 11953 // 11954 // Assumes register result differs from all input registers. 11955 // Overwrites haycnt, needlecnt. 11956 // Use dst register classes if register gets killed, as it is the case for tmp registers! 11957 instruct string_indexOf(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt, 11958 iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, 11959 flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{ 11960 match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt))); 11961 effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/ 11962 TEMP result, 11963 TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6); 11964 predicate(SpecialStringIndexOf); // See Matcher::match_rule_supported. 11965 ins_cost(300); 11966 11967 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 11968 11969 format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]" 11970 " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %} 11971 ins_encode( enc_String_IndexOf(result, haystack, haycnt, needle, needlecnt, tmp1, tmp2, tmp3, tmp4) ); 11972 ins_pipe(pipe_class_compare); 11973 %} 11974 11975 // String equals with immediate. 11976 instruct string_equals_imm(iRegPsrc str1, iRegPsrc str2, uimmI15 cntImm, iRegIdst result, 11977 iRegPdst tmp1, iRegPdst tmp2, 11978 flagsRegCR0 cr0, flagsRegCR6 cr6, regCTR ctr) %{ 11979 match(Set result (StrEquals (Binary str1 str2) cntImm)); 11980 effect(TEMP result, TEMP tmp1, TEMP tmp2, 11981 KILL cr0, KILL cr6, KILL ctr); 11982 predicate(SpecialStringEquals); // See Matcher::match_rule_supported. 11983 ins_cost(250); 11984 11985 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 11986 11987 format %{ "String Equals SCL [0..$cntImm]($str1),[0..$cntImm]($str2)" 11988 " -> $result \t// KILL $cr0, $cr6, $ctr, TEMP $result, $tmp1, $tmp2" %} 11989 ins_encode( enc_String_EqualsImm(str1, str2, cntImm, result, tmp1, tmp2) ); 11990 ins_pipe(pipe_class_compare); 11991 %} 11992 11993 // String equals. 11994 // Use dst register classes if register gets killed, as it is the case for TEMP operands! 11995 instruct string_equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst result, 11996 iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, iRegPdst tmp4, iRegPdst tmp5, 11997 flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{ 11998 match(Set result (StrEquals (Binary str1 str2) cnt)); 11999 effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, 12000 KILL cr0, KILL cr1, KILL cr6, KILL ctr); 12001 predicate(SpecialStringEquals); // See Matcher::match_rule_supported. 12002 ins_cost(300); 12003 12004 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 12005 12006 format %{ "String Equals [0..$cnt]($str1),[0..$cnt]($str2) -> $result" 12007 " \t// KILL $cr0, $cr1, $cr6, $ctr, TEMP $result, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %} 12008 ins_encode( enc_String_Equals(str1, str2, cnt, result, tmp1, tmp2, tmp3, tmp4, tmp5) ); 12009 ins_pipe(pipe_class_compare); 12010 %} 12011 12012 // String compare. 12013 // Char[] pointers are passed in. 12014 // Use dst register classes if register gets killed, as it is the case for TEMP operands! 12015 instruct string_compare(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result, 12016 iRegPdst tmp, flagsRegCR0 cr0, regCTR ctr) %{ 12017 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 12018 effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP result, TEMP tmp, KILL cr0, KILL ctr); 12019 ins_cost(300); 12020 12021 ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted. 12022 12023 format %{ "String Compare $str1[0..$cnt1], $str2[0..$cnt2] -> $result" 12024 " \t// TEMP $tmp, $result KILLs $str1, $cnt1, $str2, $cnt2, $cr0, $ctr" %} 12025 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result, tmp) ); 12026 ins_pipe(pipe_class_compare); 12027 %} 12028 12029 //---------- Min/Max Instructions --------------------------------------------- 12030 12031 instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 12032 match(Set dst (MinI src1 src2)); 12033 ins_cost(DEFAULT_COST*6); 12034 12035 expand %{ 12036 iRegLdst src1s; 12037 iRegLdst src2s; 12038 iRegLdst diff; 12039 iRegLdst sm; 12040 iRegLdst doz; // difference or zero 12041 convI2L_reg(src1s, src1); // Ensure proper sign extension. 12042 convI2L_reg(src2s, src2); // Ensure proper sign extension. 12043 subL_reg_reg(diff, src2s, src1s); 12044 // Need to consider >=33 bit result, therefore we need signmaskL. 12045 signmask64L_regL(sm, diff); 12046 andL_reg_reg(doz, diff, sm); // <=0 12047 addI_regL_regL(dst, doz, src1s); 12048 %} 12049 %} 12050 12051 instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{ 12052 match(Set dst (MaxI src1 src2)); 12053 ins_cost(DEFAULT_COST*6); 12054 12055 expand %{ 12056 iRegLdst src1s; 12057 iRegLdst src2s; 12058 iRegLdst diff; 12059 iRegLdst sm; 12060 iRegLdst doz; // difference or zero 12061 convI2L_reg(src1s, src1); // Ensure proper sign extension. 12062 convI2L_reg(src2s, src2); // Ensure proper sign extension. 12063 subL_reg_reg(diff, src2s, src1s); 12064 // Need to consider >=33 bit result, therefore we need signmaskL. 12065 signmask64L_regL(sm, diff); 12066 andcL_reg_reg(doz, diff, sm); // >=0 12067 addI_regL_regL(dst, doz, src1s); 12068 %} 12069 %} 12070 12071 //---------- Population Count Instructions ------------------------------------ 12072 12073 // Popcnt for Power7. 12074 instruct popCountI(iRegIdst dst, iRegIsrc src) %{ 12075 match(Set dst (PopCountI src)); 12076 predicate(UsePopCountInstruction && VM_Version::has_popcntw()); 12077 ins_cost(DEFAULT_COST); 12078 12079 format %{ "POPCNTW $dst, $src" %} 12080 size(4); 12081 ins_encode( enc_popcntw(dst, src) ); 12082 ins_pipe(pipe_class_default); 12083 %} 12084 12085 // Popcnt for Power7. 12086 instruct popCountL(iRegIdst dst, iRegLsrc src) %{ 12087 predicate(UsePopCountInstruction && VM_Version::has_popcntw()); 12088 match(Set dst (PopCountL src)); 12089 ins_cost(DEFAULT_COST); 12090 12091 format %{ "POPCNTD $dst, $src" %} 12092 size(4); 12093 ins_encode( enc_popcntd(dst, src) ); 12094 ins_pipe(pipe_class_default); 12095 %} 12096 12097 instruct countLeadingZerosI(iRegIdst dst, iRegIsrc src) %{ 12098 match(Set dst (CountLeadingZerosI src)); 12099 predicate(UseCountLeadingZerosInstructionsPPC64); // See Matcher::match_rule_supported. 12100 ins_cost(DEFAULT_COST); 12101 12102 format %{ "CNTLZW $dst, $src" %} 12103 size(4); 12104 ins_encode( enc_cntlzw(dst, src) ); 12105 ins_pipe(pipe_class_default); 12106 %} 12107 12108 instruct countLeadingZerosL(iRegIdst dst, iRegLsrc src) %{ 12109 match(Set dst (CountLeadingZerosL src)); 12110 predicate(UseCountLeadingZerosInstructionsPPC64); // See Matcher::match_rule_supported. 12111 ins_cost(DEFAULT_COST); 12112 12113 format %{ "CNTLZD $dst, $src" %} 12114 size(4); 12115 ins_encode( enc_cntlzd(dst, src) ); 12116 ins_pipe(pipe_class_default); 12117 %} 12118 12119 instruct countLeadingZerosP(iRegIdst dst, iRegPsrc src) %{ 12120 // no match-rule, false predicate 12121 effect(DEF dst, USE src); 12122 predicate(false); 12123 12124 format %{ "CNTLZD $dst, $src" %} 12125 size(4); 12126 ins_encode( enc_cntlzd(dst, src) ); 12127 ins_pipe(pipe_class_default); 12128 %} 12129 12130 instruct countTrailingZerosI_Ex(iRegIdst dst, iRegIsrc src) %{ 12131 match(Set dst (CountTrailingZerosI src)); 12132 predicate(UseCountLeadingZerosInstructionsPPC64); 12133 ins_cost(DEFAULT_COST); 12134 12135 expand %{ 12136 immI16 imm1 %{ (int)-1 %} 12137 immI16 imm2 %{ (int)32 %} 12138 immI_minus1 m1 %{ -1 %} 12139 iRegIdst tmpI1; 12140 iRegIdst tmpI2; 12141 iRegIdst tmpI3; 12142 addI_reg_imm16(tmpI1, src, imm1); 12143 andcI_reg_reg(tmpI2, src, m1, tmpI1); 12144 countLeadingZerosI(tmpI3, tmpI2); 12145 subI_imm16_reg(dst, imm2, tmpI3); 12146 %} 12147 %} 12148 12149 instruct countTrailingZerosL_Ex(iRegIdst dst, iRegLsrc src) %{ 12150 match(Set dst (CountTrailingZerosL src)); 12151 predicate(UseCountLeadingZerosInstructionsPPC64); 12152 ins_cost(DEFAULT_COST); 12153 12154 expand %{ 12155 immL16 imm1 %{ (long)-1 %} 12156 immI16 imm2 %{ (int)64 %} 12157 iRegLdst tmpL1; 12158 iRegLdst tmpL2; 12159 iRegIdst tmpL3; 12160 addL_reg_imm16(tmpL1, src, imm1); 12161 andcL_reg_reg(tmpL2, tmpL1, src); 12162 countLeadingZerosL(tmpL3, tmpL2); 12163 subI_imm16_reg(dst, imm2, tmpL3); 12164 %} 12165 %} 12166 12167 // Expand nodes for byte_reverse_int. 12168 instruct insrwi_a(iRegIdst dst, iRegIsrc src, immI16 imm1, immI16 imm2) %{ 12169 effect(DEF dst, USE src, USE imm1, USE imm2); 12170 predicate(false); 12171 12172 format %{ "INSRWI $dst, $src, $imm1, $imm2" %} 12173 size(4); 12174 ins_encode( enc_insrwi(dst, imm1, src, imm2) ); 12175 ins_pipe(pipe_class_default); 12176 %} 12177 12178 // As insrwi_a, but with USE_DEF. 12179 instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 imm1, immI16 imm2) %{ 12180 effect(USE_DEF dst, USE src, USE imm1, USE imm2); 12181 predicate(false); 12182 12183 format %{ "INSRWI $dst, $src, $imm1, $imm2" %} 12184 size(4); 12185 ins_encode( enc_insrwi(dst, imm1, src, imm2) ); 12186 ins_pipe(pipe_class_default); 12187 %} 12188 12189 // Just slightly faster than java implementation. 12190 instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{ 12191 match(Set dst (ReverseBytesI src)); 12192 predicate(UseCountLeadingZerosInstructionsPPC64); 12193 ins_cost(DEFAULT_COST); 12194 12195 expand %{ 12196 immI16 imm24 %{ (int) 24 %} 12197 immI16 imm16 %{ (int) 16 %} 12198 immI16 imm8 %{ (int) 8 %} 12199 immI16 imm4 %{ (int) 4 %} 12200 immI16 imm0 %{ (int) 0 %} 12201 iRegLdst tmpI1; 12202 iRegLdst tmpI2; 12203 iRegLdst tmpI3; 12204 12205 urShiftI_reg_imm(tmpI1, src, imm24); 12206 insrwi_a(dst, tmpI1, imm24, imm8); 12207 urShiftI_reg_imm(tmpI2, src, imm16); 12208 insrwi(dst, tmpI2, imm8, imm16); 12209 urShiftI_reg_imm(tmpI3, src, imm8); 12210 insrwi(dst, tmpI3, imm8, imm8); 12211 insrwi(dst, src, imm0, imm8); 12212 %} 12213 %} 12214 12215 //---------- Replicate Vector Instructions ------------------------------------ 12216 12217 // Insrdi does replicate if src == dst. 12218 instruct repl32(iRegLdst dst) %{ 12219 predicate(false); 12220 effect(USE_DEF dst); 12221 12222 format %{ "INSRDI $dst, #0, $dst, #32 \t// replicate" %} 12223 size(4); 12224 ins_encode( enc_insrdi(dst, (0), dst, (32)) ); 12225 ins_pipe(pipe_class_default); 12226 %} 12227 12228 // Insrdi does replicate if src == dst. 12229 instruct repl48(iRegLdst dst) %{ 12230 predicate(false); 12231 effect(USE_DEF dst); 12232 12233 format %{ "INSRDI $dst, #0, $dst, #48 \t// replicate" %} 12234 size(4); 12235 ins_encode( enc_insrdi(dst, (0), dst, (48)) ); 12236 ins_pipe(pipe_class_default); 12237 %} 12238 12239 // Insrdi does replicate if src == dst. 12240 instruct repl56(iRegLdst dst) %{ 12241 predicate(false); 12242 effect(USE_DEF dst); 12243 12244 format %{ "INSRDI $dst, #0, $dst, #56 \t// replicate" %} 12245 size(4); 12246 ins_encode( enc_insrdi(dst, (0), dst, (56)) ); 12247 ins_pipe(pipe_class_default); 12248 %} 12249 12250 instruct repl8B_reg_Ex(iRegLdst dst, iRegIsrc src) %{ 12251 match(Set dst (ReplicateB src)); 12252 predicate(n->as_Vector()->length() == 8); 12253 expand %{ 12254 moveReg(dst, src); 12255 repl56(dst); 12256 repl48(dst); 12257 repl32(dst); 12258 %} 12259 %} 12260 12261 instruct repl8B_immI0(iRegLdst dst, immI_0 zero) %{ 12262 match(Set dst (ReplicateB zero)); 12263 predicate(n->as_Vector()->length() == 8); 12264 format %{ "LI $dst, #0 \t// replicate8B" %} 12265 size(4); 12266 ins_encode( enc_li(dst, zero) ); 12267 ins_pipe(pipe_class_default); 12268 %} 12269 12270 instruct repl8B_immIminus1(iRegLdst dst, immI_minus1 src) %{ 12271 match(Set dst (ReplicateB src)); 12272 predicate(n->as_Vector()->length() == 8); 12273 format %{ "LI $dst, #-1 \t// replicate8B" %} 12274 size(4); 12275 ins_encode( enc_li(dst, src) ); 12276 ins_pipe(pipe_class_default); 12277 %} 12278 12279 instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{ 12280 match(Set dst (ReplicateS src)); 12281 predicate(n->as_Vector()->length() == 4); 12282 expand %{ 12283 moveReg(dst, src); 12284 repl48(dst); 12285 repl32(dst); 12286 %} 12287 %} 12288 12289 instruct repl4S_immI0(iRegLdst dst, immI_0 zero) %{ 12290 match(Set dst (ReplicateS zero)); 12291 predicate(n->as_Vector()->length() == 4); 12292 format %{ "LI $dst, #0 \t// replicate4C" %} 12293 size(4); 12294 ins_encode( enc_li(dst, zero) ); 12295 ins_pipe(pipe_class_default); 12296 %} 12297 12298 instruct repl4S_immIminus1(iRegLdst dst, immI_minus1 src) %{ 12299 match(Set dst (ReplicateS src)); 12300 predicate(n->as_Vector()->length() == 4); 12301 format %{ "LI $dst, -1 \t// replicate4C" %} 12302 size(4); 12303 ins_encode( enc_li(dst, src) ); 12304 ins_pipe(pipe_class_default); 12305 %} 12306 12307 instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{ 12308 match(Set dst (ReplicateI src)); 12309 predicate(n->as_Vector()->length() == 2); 12310 ins_cost(2 * DEFAULT_COST); 12311 expand %{ 12312 moveReg(dst, src); 12313 repl32(dst); 12314 %} 12315 %} 12316 12317 instruct repl2I_immI0(iRegLdst dst, immI_0 zero) %{ 12318 match(Set dst (ReplicateI zero)); 12319 predicate(n->as_Vector()->length() == 2); 12320 format %{ "LI $dst, #0 \t// replicate4C" %} 12321 size(4); 12322 ins_encode( enc_li(dst, zero) ); 12323 ins_pipe(pipe_class_default); 12324 %} 12325 12326 instruct repl2I_immIminus1(iRegLdst dst, immI_minus1 src) %{ 12327 match(Set dst (ReplicateI src)); 12328 predicate(n->as_Vector()->length() == 2); 12329 format %{ "LI $dst, -1 \t// replicate4C" %} 12330 size(4); 12331 ins_encode( enc_li(dst, src) ); 12332 ins_pipe(pipe_class_default); 12333 %} 12334 12335 // Move float to int register via stack, replicate. 12336 instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{ 12337 match(Set dst (ReplicateF src)); 12338 predicate(n->as_Vector()->length() == 2); 12339 ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST); 12340 expand %{ 12341 stackSlotL tmpS; 12342 iRegIdst tmpI; 12343 moveF2I_reg_stack(tmpS, src); // Move float to stack. 12344 moveF2I_stack_reg(tmpI, tmpS); // Move stack to int reg. 12345 moveReg(dst, tmpI); // Move int to long reg. 12346 repl32(dst); // Replicate bitpattern. 12347 %} 12348 %} 12349 12350 // Replicate scalar constant to packed float values in Double register 12351 instruct repl2F_immF_Ex(iRegLdst dst, immF src) %{ 12352 match(Set dst (ReplicateF src)); 12353 predicate(n->as_Vector()->length() == 2); 12354 ins_cost(5 * DEFAULT_COST); 12355 12356 format %{ "LD $dst, offset, $constanttablebase\t// load replicated float $src $src from table, late expanded" %} 12357 lateExpand( lateExpand_load_replF_constant(dst, src, constanttablebase) ); 12358 %} 12359 12360 // Replicate scalar zero constant to packed float values in Double register 12361 instruct repl2F_immF0(iRegLdst dst, immF_0 zero) %{ 12362 match(Set dst (ReplicateF zero)); 12363 predicate(n->as_Vector()->length() == 2); 12364 12365 format %{ "LI $dst, #0 \t// replicate2F" %} 12366 ins_encode( enc_li(dst, 0x0) ); 12367 ins_pipe(pipe_class_default); 12368 %} 12369 12370 // ============================================================================ 12371 // Safepoint Instruction 12372 12373 instruct safePoint_poll(iRegPdst poll) %{ 12374 match(SafePoint poll); 12375 predicate(LoadPollAddressFromThread); 12376 12377 // It caused problems to add the effect that r0 is killed, but this 12378 // effect no longer needs to be mentioned, since r0 is not contained 12379 // in a reg_class. 12380 12381 format %{ "LD R0, #0, $poll \t// Safepoint poll for GC" %} 12382 size(4); 12383 ins_encode( enc_poll(0x0, poll) ); 12384 ins_pipe(pipe_class_default); 12385 %} 12386 12387 // Load constant address of polling page into register. 12388 // This node shall not match: use immP_NM! 12389 instruct loadConPollAddr(rscratch2RegP poll, immP_NM src) %{ 12390 match(Set poll src); 12391 format %{ "LD $poll, addr of polling page \t// load constant optimized for safepoint.\n" %} 12392 12393 // Size depends on constant. 12394 ins_encode( enc_loadConPollAddr(poll) ); 12395 ins_pipe(pipe_class_default); 12396 %} 12397 12398 // Safepoint without per-thread support. Load address of page to poll 12399 // as constant. 12400 // Rscratch2RegP is R12. 12401 // LoadConPollAddr node is added in pd_post_matching_hook(). It must be 12402 // a seperate node so that the oop map is at the right location. 12403 instruct safePoint_poll_conPollAddr(rscratch2RegP poll) %{ 12404 match(SafePoint poll); 12405 predicate(!LoadPollAddressFromThread); 12406 12407 // It caused problems to add the effect that r0 is killed, but this 12408 // effect no longer needs to be mentioned, since r0 is not contained 12409 // in a reg_class. 12410 12411 format %{ "LD R12, addr of polling page\n\t" 12412 "LD R0, #0, R12 \t// Safepoint poll for GC" %} 12413 ins_encode( enc_poll(0x0, poll) ); 12414 ins_pipe(pipe_class_default); 12415 %} 12416 12417 // ============================================================================ 12418 // Call Instructions 12419 12420 // Call Java Static Instruction 12421 12422 // Schedulable version of call static node. 12423 instruct CallStaticJavaDirect(method meth) %{ 12424 match(CallStaticJava); 12425 effect(USE meth); 12426 predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke()); 12427 ins_cost(CALL_COST); 12428 12429 ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */); 12430 12431 format %{ "CALL,static $meth \t// ==> " %} 12432 size(4); 12433 ins_encode( enc_java_static_call(meth) ); 12434 ins_pipe(pipe_class_call); 12435 %} 12436 12437 // Schedulable version of call static node. 12438 instruct CallStaticJavaDirectHandle(method meth) %{ 12439 match(CallStaticJava); 12440 effect(USE meth); 12441 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke()); 12442 ins_cost(CALL_COST); 12443 12444 ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */); 12445 12446 format %{ "CALL,static $meth \t// ==> " %} 12447 ins_encode( enc_java_handle_call(meth) ); 12448 ins_pipe(pipe_class_call); 12449 %} 12450 12451 // Call Java Dynamic Instruction 12452 12453 // Used by late expand of CallDynamicJavaDirectSchedEx (actual call). 12454 // Loading of IC was late expanded. The nodes loading the IC are reachable 12455 // via fields ins_field_load_ic_hi_node and ins_field_load_ic_node. 12456 // The call destination must still be placed in the constant pool. 12457 instruct CallDynamicJavaDirectSched(method meth) %{ 12458 match(CallDynamicJava); // To get all the data fields we need ... 12459 effect(USE meth); 12460 predicate(false); // ... but never match. 12461 12462 ins_field_load_ic_hi_node(loadConL_hiNode*); 12463 ins_field_load_ic_node(loadConLNode*); 12464 ins_num_consts(1 /* 1 patchable constant: call destination */); 12465 12466 format %{ "BL \t// dynamic $meth ==> " %} 12467 size(4); 12468 ins_encode( enc_java_dynamic_call_sched(meth) ); 12469 ins_pipe(pipe_class_call); 12470 %} 12471 12472 // Schedulable (i.e. late expanded) version of call dynamic java. 12473 // We use late expanded calls if we use inline caches 12474 // and do not update method data. 12475 // 12476 // This instruction has two constants: inline cache (IC) and call destination. 12477 // Loading the inline cache will be late expanded, thus leaving a call with 12478 // one constant. 12479 instruct CallDynamicJavaDirectSched_Ex(method meth) %{ 12480 match(CallDynamicJava); 12481 effect(USE meth); 12482 predicate(UseInlineCaches); 12483 ins_cost(CALL_COST); 12484 12485 ins_requires_toc(true); 12486 ins_num_consts(2 /* 2 patchable constants: inline cache, call destination. */); 12487 12488 format %{ "CALL,dynamic $meth \t// late expanded" %} 12489 lateExpand( lateExpand_java_dynamic_call_sched(meth) ); 12490 %} 12491 12492 // Compound version of call dynamic java 12493 // We use late expanded calls if we use inline caches 12494 // and do not update method data. 12495 instruct CallDynamicJavaDirect(method meth) %{ 12496 match(CallDynamicJava); 12497 effect(USE meth); 12498 predicate(!UseInlineCaches); 12499 ins_cost(CALL_COST); 12500 12501 // Enc_java_to_runtime_call needs up to 4 constants (method data oop). 12502 ins_num_consts(4); 12503 ins_requires_toc(true); 12504 12505 format %{ "CALL,dynamic $meth \t// ==> " %} 12506 ins_encode( enc_java_dynamic_call(meth) ); 12507 ins_pipe(pipe_class_call); 12508 %} 12509 12510 // Call Runtime Instruction 12511 12512 instruct CallRuntimeDirect(method meth) %{ 12513 match(CallRuntime); 12514 effect(USE meth); 12515 ins_cost(CALL_COST); 12516 12517 // Enc_java_to_runtime_call needs up to 3 constants: call target, 12518 // env for callee, C-toc. 12519 ins_num_consts(3); 12520 12521 format %{ "CALL,runtime" %} 12522 ins_encode( enc_java_to_runtime_call(meth) ); 12523 ins_pipe(pipe_class_call); 12524 %} 12525 12526 // Call Leaf 12527 12528 // Used by late expand of CallLeafDirectSched (mtctr). 12529 instruct CallLeafDirect_mtctr(iRegLdst dst, iRegLsrc src) %{ 12530 effect(DEF dst, USE src); 12531 12532 ins_num_consts(1); 12533 12534 format %{ "MTCTR $src" %} 12535 size(4); 12536 ins_encode( enc_leaf_call_mtctr(src) ); 12537 ins_pipe(pipe_class_default); 12538 %} 12539 12540 // Used by late expand of CallLeafDirectSched (actual call). 12541 instruct CallLeafDirect(method meth) %{ 12542 match(CallLeaf); // To get the data all the data fields we need ... 12543 effect(USE meth); 12544 predicate(false); // but never match. 12545 12546 format %{ "BCTRL \t// leaf call $meth ==> " %} 12547 size(4); 12548 ins_encode( enc_leaf_call(meth) ); 12549 ins_pipe(pipe_class_call); 12550 %} 12551 12552 // Late expand of CallLeafDirect. 12553 // Load adress to call from TOC, then bl to it. 12554 instruct CallLeafDirect_Ex(method meth) %{ 12555 match(CallLeaf); 12556 effect(USE meth); 12557 ins_cost(CALL_COST); 12558 12559 // Enc_java_to_runtime_call needs up to 3 constants: call target, 12560 // env for callee, C-toc. 12561 ins_num_consts(3); 12562 ins_requires_toc(true); 12563 12564 format %{ "CALL,runtime leaf $meth \t// late expanded" %} 12565 lateExpand( lateExpand_java_to_runtime_call(meth) ); 12566 %} 12567 12568 // Call runtime without safepoint - same as CallLeaf. 12569 // Late expand of CallLeafNoFPDirect. 12570 // Load adress to call from TOC, then bl to it. 12571 instruct CallLeafNoFPDirect_Ex(method meth) %{ 12572 match(CallLeafNoFP); 12573 effect(USE meth); 12574 ins_cost(CALL_COST); 12575 12576 // Enc_java_to_runtime_call needs up to 3 constants: call target, 12577 // env for callee, C-toc. 12578 ins_num_consts(3); 12579 ins_requires_toc(true); 12580 12581 format %{ "CALL,runtime leaf nofp $meth \t// late expanded" %} 12582 lateExpand( lateExpand_java_to_runtime_call(meth) ); 12583 %} 12584 12585 // Tail Call; Jump from runtime stub to Java code. 12586 // Also known as an 'interprocedural jump'. 12587 // Target of jump will eventually return to caller. 12588 // TailJump below removes the return address. 12589 instruct TailCalljmpInd(iRegPdstNoScratch jump_target, inline_cache_regP method_oop) %{ 12590 match(TailCall jump_target method_oop); 12591 ins_cost(CALL_COST); 12592 12593 format %{ "MTCTR $jump_target \t// $method_oop holds method oop\n\t" 12594 "BCTR \t// tail call" %} 12595 size(8); 12596 ins_encode( enc_form_jmpl(jump_target) ); 12597 ins_pipe(pipe_class_call); 12598 %} 12599 12600 // Return Instruction 12601 instruct Ret() %{ 12602 match(Return); 12603 format %{ "BLR \t// branch to link register" %} 12604 size(4); 12605 ins_encode( enc_Ret() ); 12606 ins_pipe(pipe_class_default); 12607 %} 12608 12609 // Tail Jump; remove the return address; jump to target. 12610 // TailCall above leaves the return address around. 12611 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2). 12612 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a 12613 // "restore" before this instruction (in Epilogue), we need to materialize it 12614 // in %i0. 12615 instruct tailjmpInd(iRegPdstNoScratch jump_target, rarg1RegP ex_oop) %{ 12616 match(TailJump jump_target ex_oop); 12617 ins_cost(CALL_COST); 12618 12619 format %{ "LD R4_ARG2 = LR\n\t" 12620 "MTCTR $jump_target\n\t" 12621 "BCTR \t// TailJump, exception oop: $ex_oop" %} 12622 size(12); 12623 ins_encode( enc_jump_set_exception_pc(jump_target) ); 12624 ins_pipe(pipe_class_call); 12625 %} 12626 12627 // Create exception oop: created by stack-crawling runtime code. 12628 // Created exception is now available to this handler, and is setup 12629 // just prior to jumping to this handler. No code emitted. 12630 instruct CreateException(rarg1RegP ex_oop) %{ 12631 match(Set ex_oop (CreateEx)); 12632 ins_cost(0); 12633 12634 format %{ " -- \t// exception oop; no code emitted" %} 12635 size(0); 12636 ins_encode( /*empty*/ ); 12637 ins_pipe(pipe_class_default); 12638 %} 12639 12640 // Rethrow exception: The exception oop will come in the first 12641 // argument position. Then JUMP (not call) to the rethrow stub code. 12642 instruct RethrowException() %{ 12643 match(Rethrow); 12644 ins_cost(CALL_COST); 12645 12646 format %{ "Jmp rethrow_stub" %} 12647 ins_encode( enc_rethrow() ); 12648 ins_pipe(pipe_class_call); 12649 %} 12650 12651 // Die now. 12652 instruct ShouldNotReachHere() %{ 12653 match(Halt); 12654 ins_cost(CALL_COST); 12655 12656 format %{ "ShouldNotReachHere" %} 12657 size(4); 12658 ins_encode( enc_shouldnotreachhere() ); 12659 ins_pipe(pipe_class_default); 12660 %} 12661 12662 // This name is KNOWN by the ADLC and cannot be changed. The ADLC 12663 // forces a 'TypeRawPtr::BOTTOM' output type for this guy. 12664 // Get a DEF on threadRegP, no costs, no encoding, use 12665 // 'ins_should_rematerialize(true)' to avoid spilling. 12666 instruct tlsLoadP(threadRegP dst) %{ 12667 match(Set dst (ThreadLocal)); 12668 ins_cost(0); 12669 12670 ins_should_rematerialize(true); 12671 12672 format %{ " -- \t// $dst=Thread::current(), empty" %} 12673 size(0); 12674 ins_encode( /*empty*/ ); 12675 ins_pipe(pipe_class_empty); 12676 %} 12677 12678 //---Some PPC specific nodes--------------------------------------------------- 12679 12680 // Stop a group. 12681 instruct endGroup() %{ 12682 ins_cost(0); 12683 12684 ins_is_nop(true); 12685 12686 format %{ "End Bundle (ori r1, r1, 0)" %} 12687 size(4); 12688 ins_encode( enc_end_bundle() ); 12689 ins_pipe(pipe_class_default); 12690 %} 12691 12692 // Nop instructions 12693 12694 instruct fxNop() %{ 12695 ins_cost(0); 12696 12697 ins_is_nop(true); 12698 12699 format %{ "fxNop" %} 12700 size(4); 12701 ins_encode( enc_fxnop() ); 12702 ins_pipe(pipe_class_default); 12703 %} 12704 12705 instruct fpNop0() %{ 12706 ins_cost(0); 12707 12708 ins_is_nop(true); 12709 12710 format %{ "fpNop0" %} 12711 size(4); 12712 ins_encode( enc_fpnop0() ); 12713 ins_pipe(pipe_class_default); 12714 %} 12715 12716 instruct fpNop1() %{ 12717 ins_cost(0); 12718 12719 ins_is_nop(true); 12720 12721 format %{ "fpNop1" %} 12722 size(4); 12723 ins_encode( enc_fpnop1() ); 12724 ins_pipe(pipe_class_default); 12725 %} 12726 12727 instruct brNop0() %{ 12728 ins_cost(0); 12729 size(4); 12730 format %{ "brNop0" %} 12731 ins_encode( enc_brnop0() ); 12732 ins_is_nop(true); 12733 ins_pipe(pipe_class_default); 12734 %} 12735 12736 instruct brNop1() %{ 12737 ins_cost(0); 12738 12739 ins_is_nop(true); 12740 12741 format %{ "brNop1" %} 12742 size(4); 12743 ins_encode( enc_brnop1() ); 12744 ins_pipe(pipe_class_default); 12745 %} 12746 12747 instruct brNop2() %{ 12748 ins_cost(0); 12749 12750 ins_is_nop(true); 12751 12752 format %{ "brNop2" %} 12753 size(4); 12754 ins_encode( enc_brnop2() ); 12755 ins_pipe(pipe_class_default); 12756 %} 12757 12758 //----------PEEPHOLE RULES----------------------------------------------------- 12759 // These must follow all instruction definitions as they use the names 12760 // defined in the instructions definitions. 12761 // 12762 // peepmatch ( root_instr_name [preceeding_instruction]* ); 12763 // 12764 // peepconstraint %{ 12765 // (instruction_number.operand_name relational_op instruction_number.operand_name 12766 // [, ...] ); 12767 // // instruction numbers are zero-based using left to right order in peepmatch 12768 // 12769 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 12770 // // provide an instruction_number.operand_name for each operand that appears 12771 // // in the replacement instruction's match rule 12772 // 12773 // ---------VM FLAGS--------------------------------------------------------- 12774 // 12775 // All peephole optimizations can be turned off using -XX:-OptoPeephole 12776 // 12777 // Each peephole rule is given an identifying number starting with zero and 12778 // increasing by one in the order seen by the parser. An individual peephole 12779 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 12780 // on the command-line. 12781 // 12782 // ---------CURRENT LIMITATIONS---------------------------------------------- 12783 // 12784 // Only match adjacent instructions in same basic block 12785 // Only equality constraints 12786 // Only constraints between operands, not (0.dest_reg == EAX_enc) 12787 // Only one replacement instruction 12788 // 12789 // ---------EXAMPLE---------------------------------------------------------- 12790 // 12791 // // pertinent parts of existing instructions in architecture description 12792 // instruct movI(eRegI dst, eRegI src) %{ 12793 // match(Set dst (CopyI src)); 12794 // %} 12795 // 12796 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{ 12797 // match(Set dst (AddI dst src)); 12798 // effect(KILL cr); 12799 // %} 12800 // 12801 // // Change (inc mov) to lea 12802 // peephole %{ 12803 // // increment preceeded by register-register move 12804 // peepmatch ( incI_eReg movI ); 12805 // // require that the destination register of the increment 12806 // // match the destination register of the move 12807 // peepconstraint ( 0.dst == 1.dst ); 12808 // // construct a replacement instruction that sets 12809 // // the destination to ( move's source register + one ) 12810 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); 12811 // %} 12812 // 12813 // Implementation no longer uses movX instructions since 12814 // machine-independent system no longer uses CopyX nodes. 12815 // 12816 // peephole %{ 12817 // peepmatch ( incI_eReg movI ); 12818 // peepconstraint ( 0.dst == 1.dst ); 12819 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); 12820 // %} 12821 // 12822 // peephole %{ 12823 // peepmatch ( decI_eReg movI ); 12824 // peepconstraint ( 0.dst == 1.dst ); 12825 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); 12826 // %} 12827 // 12828 // peephole %{ 12829 // peepmatch ( addI_eReg_imm movI ); 12830 // peepconstraint ( 0.dst == 1.dst ); 12831 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) ); 12832 // %} 12833 // 12834 // peephole %{ 12835 // peepmatch ( addP_eReg_imm movP ); 12836 // peepconstraint ( 0.dst == 1.dst ); 12837 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) ); 12838 // %} 12839 12840 // // Change load of spilled value to only a spill 12841 // instruct storeI(memory mem, eRegI src) %{ 12842 // match(Set mem (StoreI mem src)); 12843 // %} 12844 // 12845 // instruct loadI(eRegI dst, memory mem) %{ 12846 // match(Set dst (LoadI mem)); 12847 // %} 12848 // 12849 peephole %{ 12850 peepmatch ( loadI storeI ); 12851 peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 12852 peepreplace ( storeI( 1.mem 1.mem 1.src ) ); 12853 %} 12854 12855 peephole %{ 12856 peepmatch ( loadL storeL ); 12857 peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem ); 12858 peepreplace ( storeL( 1.mem 1.mem 1.src ) ); 12859 %} 12860 12861 peephole %{ 12862 peepmatch ( loadP storeP ); 12863 peepconstraint ( 1.src == 0.dst, 1.dst == 0.mem ); 12864 peepreplace ( storeP( 1.dst 1.dst 1.src ) ); 12865 %} 12866 12867 //----------SMARTSPILL RULES--------------------------------------------------- 12868 // These must follow all instruction definitions as they use the names 12869 // defined in the instructions definitions.