1 // 2 // Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 // 5 // This code is free software; you can redistribute it and/or modify it 6 // under the terms of the GNU General Public License version 2 only, as 7 // published by the Free Software Foundation. 8 // 9 // This code is distributed in the hope that it will be useful, but WITHOUT 10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 // version 2 for more details (a copy is included in the LICENSE file that 13 // accompanied this code). 14 // 15 // You should have received a copy of the GNU General Public License version 16 // 2 along with this work; if not, write to the Free Software Foundation, 17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 // 19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 // or visit www.oracle.com if you need additional information or have any 21 // questions. 22 // 23 // 24 25 // AMD64 Architecture Description File 26 27 //----------REGISTER DEFINITION BLOCK------------------------------------------ 28 // This information is used by the matcher and the register allocator to 29 // describe individual registers and classes of registers within the target 30 // archtecture. 31 32 register %{ 33 //----------Architecture Description Register Definitions---------------------- 34 // General Registers 35 // "reg_def" name ( register save type, C convention save type, 36 // ideal register type, encoding ); 37 // Register Save Types: 38 // 39 // NS = No-Save: The register allocator assumes that these registers 40 // can be used without saving upon entry to the method, & 41 // that they do not need to be saved at call sites. 42 // 43 // SOC = Save-On-Call: The register allocator assumes that these registers 44 // can be used without saving upon entry to the method, 45 // but that they must be saved at call sites. 46 // 47 // SOE = Save-On-Entry: The register allocator assumes that these registers 48 // must be saved before using them upon entry to the 49 // method, but they do not need to be saved at call 50 // sites. 51 // 52 // AS = Always-Save: The register allocator assumes that these registers 53 // must be saved before using them upon entry to the 54 // method, & that they must be saved at call sites. 55 // 56 // Ideal Register Type is used to determine how to save & restore a 57 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get 58 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. 59 // 60 // The encoding number is the actual bit-pattern placed into the opcodes. 61 62 // General Registers 63 // R8-R15 must be encoded with REX. (RSP, RBP, RSI, RDI need REX when 64 // used as byte registers) 65 66 // Previously set RBX, RSI, and RDI as save-on-entry for java code 67 // Turn off SOE in java-code due to frequent use of uncommon-traps. 68 // Now that allocator is better, turn on RSI and RDI as SOE registers. 69 70 reg_def RAX (SOC, SOC, Op_RegI, 0, rax->as_VMReg()); 71 reg_def RAX_H(SOC, SOC, Op_RegI, 0, rax->as_VMReg()->next()); 72 73 reg_def RCX (SOC, SOC, Op_RegI, 1, rcx->as_VMReg()); 74 reg_def RCX_H(SOC, SOC, Op_RegI, 1, rcx->as_VMReg()->next()); 75 76 reg_def RDX (SOC, SOC, Op_RegI, 2, rdx->as_VMReg()); 77 reg_def RDX_H(SOC, SOC, Op_RegI, 2, rdx->as_VMReg()->next()); 78 79 reg_def RBX (SOC, SOE, Op_RegI, 3, rbx->as_VMReg()); 80 reg_def RBX_H(SOC, SOE, Op_RegI, 3, rbx->as_VMReg()->next()); 81 82 reg_def RSP (NS, NS, Op_RegI, 4, rsp->as_VMReg()); 83 reg_def RSP_H(NS, NS, Op_RegI, 4, rsp->as_VMReg()->next()); 84 85 // now that adapter frames are gone RBP is always saved and restored by the prolog/epilog code 86 reg_def RBP (NS, SOE, Op_RegI, 5, rbp->as_VMReg()); 87 reg_def RBP_H(NS, SOE, Op_RegI, 5, rbp->as_VMReg()->next()); 88 89 #ifdef _WIN64 90 91 reg_def RSI (SOC, SOE, Op_RegI, 6, rsi->as_VMReg()); 92 reg_def RSI_H(SOC, SOE, Op_RegI, 6, rsi->as_VMReg()->next()); 93 94 reg_def RDI (SOC, SOE, Op_RegI, 7, rdi->as_VMReg()); 95 reg_def RDI_H(SOC, SOE, Op_RegI, 7, rdi->as_VMReg()->next()); 96 97 #else 98 99 reg_def RSI (SOC, SOC, Op_RegI, 6, rsi->as_VMReg()); 100 reg_def RSI_H(SOC, SOC, Op_RegI, 6, rsi->as_VMReg()->next()); 101 102 reg_def RDI (SOC, SOC, Op_RegI, 7, rdi->as_VMReg()); 103 reg_def RDI_H(SOC, SOC, Op_RegI, 7, rdi->as_VMReg()->next()); 104 105 #endif 106 107 reg_def R8 (SOC, SOC, Op_RegI, 8, r8->as_VMReg()); 108 reg_def R8_H (SOC, SOC, Op_RegI, 8, r8->as_VMReg()->next()); 109 110 reg_def R9 (SOC, SOC, Op_RegI, 9, r9->as_VMReg()); 111 reg_def R9_H (SOC, SOC, Op_RegI, 9, r9->as_VMReg()->next()); 112 113 reg_def R10 (SOC, SOC, Op_RegI, 10, r10->as_VMReg()); 114 reg_def R10_H(SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next()); 115 116 reg_def R11 (SOC, SOC, Op_RegI, 11, r11->as_VMReg()); 117 reg_def R11_H(SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next()); 118 119 reg_def R12 (SOC, SOE, Op_RegI, 12, r12->as_VMReg()); 120 reg_def R12_H(SOC, SOE, Op_RegI, 12, r12->as_VMReg()->next()); 121 122 reg_def R13 (SOC, SOE, Op_RegI, 13, r13->as_VMReg()); 123 reg_def R13_H(SOC, SOE, Op_RegI, 13, r13->as_VMReg()->next()); 124 125 reg_def R14 (SOC, SOE, Op_RegI, 14, r14->as_VMReg()); 126 reg_def R14_H(SOC, SOE, Op_RegI, 14, r14->as_VMReg()->next()); 127 128 reg_def R15 (SOC, SOE, Op_RegI, 15, r15->as_VMReg()); 129 reg_def R15_H(SOC, SOE, Op_RegI, 15, r15->as_VMReg()->next()); 130 131 132 // Floating Point Registers 133 134 // XMM registers. 128-bit registers or 4 words each, labeled (a)-d. 135 // Word a in each register holds a Float, words ab hold a Double. We 136 // currently do not use the SIMD capabilities, so registers cd are 137 // unused at the moment. 138 // XMM8-XMM15 must be encoded with REX. 139 // Linux ABI: No register preserved across function calls 140 // XMM0-XMM7 might hold parameters 141 // Windows ABI: XMM6-XMM15 preserved across function calls 142 // XMM0-XMM3 might hold parameters 143 144 reg_def XMM0 (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()); 145 reg_def XMM0_H (SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next()); 146 147 reg_def XMM1 (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()); 148 reg_def XMM1_H (SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next()); 149 150 reg_def XMM2 (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()); 151 reg_def XMM2_H (SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next()); 152 153 reg_def XMM3 (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()); 154 reg_def XMM3_H (SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next()); 155 156 reg_def XMM4 (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()); 157 reg_def XMM4_H (SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next()); 158 159 reg_def XMM5 (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()); 160 reg_def XMM5_H (SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next()); 161 162 #ifdef _WIN64 163 164 reg_def XMM6 (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()); 165 reg_def XMM6_H (SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next()); 166 167 reg_def XMM7 (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()); 168 reg_def XMM7_H (SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next()); 169 170 reg_def XMM8 (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()); 171 reg_def XMM8_H (SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next()); 172 173 reg_def XMM9 (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()); 174 reg_def XMM9_H (SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next()); 175 176 reg_def XMM10 (SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()); 177 reg_def XMM10_H(SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next()); 178 179 reg_def XMM11 (SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()); 180 reg_def XMM11_H(SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next()); 181 182 reg_def XMM12 (SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()); 183 reg_def XMM12_H(SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next()); 184 185 reg_def XMM13 (SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()); 186 reg_def XMM13_H(SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next()); 187 188 reg_def XMM14 (SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()); 189 reg_def XMM14_H(SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next()); 190 191 reg_def XMM15 (SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()); 192 reg_def XMM15_H(SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next()); 193 194 #else 195 196 reg_def XMM6 (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()); 197 reg_def XMM6_H (SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next()); 198 199 reg_def XMM7 (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()); 200 reg_def XMM7_H (SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next()); 201 202 reg_def XMM8 (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()); 203 reg_def XMM8_H (SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next()); 204 205 reg_def XMM9 (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()); 206 reg_def XMM9_H (SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next()); 207 208 reg_def XMM10 (SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()); 209 reg_def XMM10_H(SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next()); 210 211 reg_def XMM11 (SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()); 212 reg_def XMM11_H(SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next()); 213 214 reg_def XMM12 (SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()); 215 reg_def XMM12_H(SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next()); 216 217 reg_def XMM13 (SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()); 218 reg_def XMM13_H(SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next()); 219 220 reg_def XMM14 (SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()); 221 reg_def XMM14_H(SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next()); 222 223 reg_def XMM15 (SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()); 224 reg_def XMM15_H(SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next()); 225 226 #endif // _WIN64 227 228 reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad()); 229 230 // Specify priority of register selection within phases of register 231 // allocation. Highest priority is first. A useful heuristic is to 232 // give registers a low priority when they are required by machine 233 // instructions, like EAX and EDX on I486, and choose no-save registers 234 // before save-on-call, & save-on-call before save-on-entry. Registers 235 // which participate in fixed calling sequences should come last. 236 // Registers which are used as pairs must fall on an even boundary. 237 238 alloc_class chunk0(R10, R10_H, 239 R11, R11_H, 240 R8, R8_H, 241 R9, R9_H, 242 R12, R12_H, 243 RCX, RCX_H, 244 RBX, RBX_H, 245 RDI, RDI_H, 246 RDX, RDX_H, 247 RSI, RSI_H, 248 RAX, RAX_H, 249 RBP, RBP_H, 250 R13, R13_H, 251 R14, R14_H, 252 R15, R15_H, 253 RSP, RSP_H); 254 255 // XXX probably use 8-15 first on Linux 256 alloc_class chunk1(XMM0, XMM0_H, 257 XMM1, XMM1_H, 258 XMM2, XMM2_H, 259 XMM3, XMM3_H, 260 XMM4, XMM4_H, 261 XMM5, XMM5_H, 262 XMM6, XMM6_H, 263 XMM7, XMM7_H, 264 XMM8, XMM8_H, 265 XMM9, XMM9_H, 266 XMM10, XMM10_H, 267 XMM11, XMM11_H, 268 XMM12, XMM12_H, 269 XMM13, XMM13_H, 270 XMM14, XMM14_H, 271 XMM15, XMM15_H); 272 273 alloc_class chunk2(RFLAGS); 274 275 276 //----------Architecture Description Register Classes-------------------------- 277 // Several register classes are automatically defined based upon information in 278 // this architecture description. 279 // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ ) 280 // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ ) 281 // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ ) 282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) 283 // 284 285 // Class for all pointer registers (including RSP) 286 reg_class any_reg(RAX, RAX_H, 287 RDX, RDX_H, 288 RBP, RBP_H, 289 RDI, RDI_H, 290 RSI, RSI_H, 291 RCX, RCX_H, 292 RBX, RBX_H, 293 RSP, RSP_H, 294 R8, R8_H, 295 R9, R9_H, 296 R10, R10_H, 297 R11, R11_H, 298 R12, R12_H, 299 R13, R13_H, 300 R14, R14_H, 301 R15, R15_H); 302 303 // Class for all pointer registers except RSP 304 reg_class ptr_reg(RAX, RAX_H, 305 RDX, RDX_H, 306 RBP, RBP_H, 307 RDI, RDI_H, 308 RSI, RSI_H, 309 RCX, RCX_H, 310 RBX, RBX_H, 311 R8, R8_H, 312 R9, R9_H, 313 R10, R10_H, 314 R11, R11_H, 315 R13, R13_H, 316 R14, R14_H); 317 318 // Class for all pointer registers except RAX and RSP 319 reg_class ptr_no_rax_reg(RDX, RDX_H, 320 RBP, RBP_H, 321 RDI, RDI_H, 322 RSI, RSI_H, 323 RCX, RCX_H, 324 RBX, RBX_H, 325 R8, R8_H, 326 R9, R9_H, 327 R10, R10_H, 328 R11, R11_H, 329 R13, R13_H, 330 R14, R14_H); 331 332 reg_class ptr_no_rbp_reg(RDX, RDX_H, 333 RAX, RAX_H, 334 RDI, RDI_H, 335 RSI, RSI_H, 336 RCX, RCX_H, 337 RBX, RBX_H, 338 R8, R8_H, 339 R9, R9_H, 340 R10, R10_H, 341 R11, R11_H, 342 R13, R13_H, 343 R14, R14_H); 344 345 // Class for all pointer registers except RAX, RBX and RSP 346 reg_class ptr_no_rax_rbx_reg(RDX, RDX_H, 347 RBP, RBP_H, 348 RDI, RDI_H, 349 RSI, RSI_H, 350 RCX, RCX_H, 351 R8, R8_H, 352 R9, R9_H, 353 R10, R10_H, 354 R11, R11_H, 355 R13, R13_H, 356 R14, R14_H); 357 358 // Singleton class for RAX pointer register 359 reg_class ptr_rax_reg(RAX, RAX_H); 360 361 // Singleton class for RBX pointer register 362 reg_class ptr_rbx_reg(RBX, RBX_H); 363 364 // Singleton class for RSI pointer register 365 reg_class ptr_rsi_reg(RSI, RSI_H); 366 367 // Singleton class for RDI pointer register 368 reg_class ptr_rdi_reg(RDI, RDI_H); 369 370 // Singleton class for RBP pointer register 371 reg_class ptr_rbp_reg(RBP, RBP_H); 372 373 // Singleton class for stack pointer 374 reg_class ptr_rsp_reg(RSP, RSP_H); 375 376 // Singleton class for TLS pointer 377 reg_class ptr_r15_reg(R15, R15_H); 378 379 // Class for all long registers (except RSP) 380 reg_class long_reg(RAX, RAX_H, 381 RDX, RDX_H, 382 RBP, RBP_H, 383 RDI, RDI_H, 384 RSI, RSI_H, 385 RCX, RCX_H, 386 RBX, RBX_H, 387 R8, R8_H, 388 R9, R9_H, 389 R10, R10_H, 390 R11, R11_H, 391 R13, R13_H, 392 R14, R14_H); 393 394 // Class for all long registers except RAX, RDX (and RSP) 395 reg_class long_no_rax_rdx_reg(RBP, RBP_H, 396 RDI, RDI_H, 397 RSI, RSI_H, 398 RCX, RCX_H, 399 RBX, RBX_H, 400 R8, R8_H, 401 R9, R9_H, 402 R10, R10_H, 403 R11, R11_H, 404 R13, R13_H, 405 R14, R14_H); 406 407 // Class for all long registers except RCX (and RSP) 408 reg_class long_no_rcx_reg(RBP, RBP_H, 409 RDI, RDI_H, 410 RSI, RSI_H, 411 RAX, RAX_H, 412 RDX, RDX_H, 413 RBX, RBX_H, 414 R8, R8_H, 415 R9, R9_H, 416 R10, R10_H, 417 R11, R11_H, 418 R13, R13_H, 419 R14, R14_H); 420 421 // Class for all long registers except RAX (and RSP) 422 reg_class long_no_rax_reg(RBP, RBP_H, 423 RDX, RDX_H, 424 RDI, RDI_H, 425 RSI, RSI_H, 426 RCX, RCX_H, 427 RBX, RBX_H, 428 R8, R8_H, 429 R9, R9_H, 430 R10, R10_H, 431 R11, R11_H, 432 R13, R13_H, 433 R14, R14_H); 434 435 // Singleton class for RAX long register 436 reg_class long_rax_reg(RAX, RAX_H); 437 438 // Singleton class for RCX long register 439 reg_class long_rcx_reg(RCX, RCX_H); 440 441 // Singleton class for RDX long register 442 reg_class long_rdx_reg(RDX, RDX_H); 443 444 // Class for all int registers (except RSP) 445 reg_class int_reg(RAX, 446 RDX, 447 RBP, 448 RDI, 449 RSI, 450 RCX, 451 RBX, 452 R8, 453 R9, 454 R10, 455 R11, 456 R13, 457 R14); 458 459 // Class for all int registers except RCX (and RSP) 460 reg_class int_no_rcx_reg(RAX, 461 RDX, 462 RBP, 463 RDI, 464 RSI, 465 RBX, 466 R8, 467 R9, 468 R10, 469 R11, 470 R13, 471 R14); 472 473 // Class for all int registers except RAX, RDX (and RSP) 474 reg_class int_no_rax_rdx_reg(RBP, 475 RDI, 476 RSI, 477 RCX, 478 RBX, 479 R8, 480 R9, 481 R10, 482 R11, 483 R13, 484 R14); 485 486 // Singleton class for RAX int register 487 reg_class int_rax_reg(RAX); 488 489 // Singleton class for RBX int register 490 reg_class int_rbx_reg(RBX); 491 492 // Singleton class for RCX int register 493 reg_class int_rcx_reg(RCX); 494 495 // Singleton class for RCX int register 496 reg_class int_rdx_reg(RDX); 497 498 // Singleton class for RCX int register 499 reg_class int_rdi_reg(RDI); 500 501 // Singleton class for instruction pointer 502 // reg_class ip_reg(RIP); 503 504 // Singleton class for condition codes 505 reg_class int_flags(RFLAGS); 506 507 // Class for all float registers 508 reg_class float_reg(XMM0, 509 XMM1, 510 XMM2, 511 XMM3, 512 XMM4, 513 XMM5, 514 XMM6, 515 XMM7, 516 XMM8, 517 XMM9, 518 XMM10, 519 XMM11, 520 XMM12, 521 XMM13, 522 XMM14, 523 XMM15); 524 525 // Class for all double registers 526 reg_class double_reg(XMM0, XMM0_H, 527 XMM1, XMM1_H, 528 XMM2, XMM2_H, 529 XMM3, XMM3_H, 530 XMM4, XMM4_H, 531 XMM5, XMM5_H, 532 XMM6, XMM6_H, 533 XMM7, XMM7_H, 534 XMM8, XMM8_H, 535 XMM9, XMM9_H, 536 XMM10, XMM10_H, 537 XMM11, XMM11_H, 538 XMM12, XMM12_H, 539 XMM13, XMM13_H, 540 XMM14, XMM14_H, 541 XMM15, XMM15_H); 542 %} 543 544 545 //----------SOURCE BLOCK------------------------------------------------------- 546 // This is a block of C++ code which provides values, functions, and 547 // definitions necessary in the rest of the architecture description 548 source %{ 549 #define RELOC_IMM64 Assembler::imm_operand 550 #define RELOC_DISP32 Assembler::disp32_operand 551 552 #define __ _masm. 553 554 static int preserve_SP_size() { 555 return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg) 556 } 557 558 // !!!!! Special hack to get all types of calls to specify the byte offset 559 // from the start of the call to the point where the return address 560 // will point. 561 int MachCallStaticJavaNode::ret_addr_offset() 562 { 563 int offset = 5; // 5 bytes from start of call to where return address points 564 if (_method_handle_invoke) 565 offset += preserve_SP_size(); 566 return offset; 567 } 568 569 int MachCallDynamicJavaNode::ret_addr_offset() 570 { 571 return 15; // 15 bytes from start of call to where return address points 572 } 573 574 // In os_cpu .ad file 575 // int MachCallRuntimeNode::ret_addr_offset() 576 577 // Indicate if the safepoint node needs the polling page as an input, 578 // it does if the polling page is more than disp32 away. 579 bool SafePointNode::needs_polling_address_input() 580 { 581 return Assembler::is_polling_page_far(); 582 } 583 584 // 585 // Compute padding required for nodes which need alignment 586 // 587 588 // The address of the call instruction needs to be 4-byte aligned to 589 // ensure that it does not span a cache line so that it can be patched. 590 int CallStaticJavaDirectNode::compute_padding(int current_offset) const 591 { 592 current_offset += 1; // skip call opcode byte 593 return round_to(current_offset, alignment_required()) - current_offset; 594 } 595 596 // The address of the call instruction needs to be 4-byte aligned to 597 // ensure that it does not span a cache line so that it can be patched. 598 int CallStaticJavaHandleNode::compute_padding(int current_offset) const 599 { 600 current_offset += preserve_SP_size(); // skip mov rbp, rsp 601 current_offset += 1; // skip call opcode byte 602 return round_to(current_offset, alignment_required()) - current_offset; 603 } 604 605 // The address of the call instruction needs to be 4-byte aligned to 606 // ensure that it does not span a cache line so that it can be patched. 607 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const 608 { 609 current_offset += 11; // skip movq instruction + call opcode byte 610 return round_to(current_offset, alignment_required()) - current_offset; 611 } 612 613 #ifndef PRODUCT 614 void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const 615 { 616 st->print("INT3"); 617 } 618 #endif 619 620 // EMIT_RM() 621 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) { 622 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3); 623 cbuf.insts()->emit_int8(c); 624 } 625 626 // EMIT_CC() 627 void emit_cc(CodeBuffer &cbuf, int f1, int f2) { 628 unsigned char c = (unsigned char) (f1 | f2); 629 cbuf.insts()->emit_int8(c); 630 } 631 632 // EMIT_OPCODE() 633 void emit_opcode(CodeBuffer &cbuf, int code) { 634 cbuf.insts()->emit_int8((unsigned char) code); 635 } 636 637 // EMIT_OPCODE() w/ relocation information 638 void emit_opcode(CodeBuffer &cbuf, 639 int code, relocInfo::relocType reloc, int offset, int format) 640 { 641 cbuf.relocate(cbuf.insts_mark() + offset, reloc, format); 642 emit_opcode(cbuf, code); 643 } 644 645 // EMIT_D8() 646 void emit_d8(CodeBuffer &cbuf, int d8) { 647 cbuf.insts()->emit_int8((unsigned char) d8); 648 } 649 650 // EMIT_D16() 651 void emit_d16(CodeBuffer &cbuf, int d16) { 652 cbuf.insts()->emit_int16(d16); 653 } 654 655 // EMIT_D32() 656 void emit_d32(CodeBuffer &cbuf, int d32) { 657 cbuf.insts()->emit_int32(d32); 658 } 659 660 // EMIT_D64() 661 void emit_d64(CodeBuffer &cbuf, int64_t d64) { 662 cbuf.insts()->emit_int64(d64); 663 } 664 665 // emit 32 bit value and construct relocation entry from relocInfo::relocType 666 void emit_d32_reloc(CodeBuffer& cbuf, 667 int d32, 668 relocInfo::relocType reloc, 669 int format) 670 { 671 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc"); 672 cbuf.relocate(cbuf.insts_mark(), reloc, format); 673 cbuf.insts()->emit_int32(d32); 674 } 675 676 // emit 32 bit value and construct relocation entry from RelocationHolder 677 void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) { 678 #ifdef ASSERT 679 if (rspec.reloc()->type() == relocInfo::oop_type && 680 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { 681 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code"); 682 } 683 #endif 684 cbuf.relocate(cbuf.insts_mark(), rspec, format); 685 cbuf.insts()->emit_int32(d32); 686 } 687 688 void emit_d32_reloc(CodeBuffer& cbuf, address addr) { 689 address next_ip = cbuf.insts_end() + 4; 690 emit_d32_reloc(cbuf, (int) (addr - next_ip), 691 external_word_Relocation::spec(addr), 692 RELOC_DISP32); 693 } 694 695 696 // emit 64 bit value and construct relocation entry from relocInfo::relocType 697 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) { 698 cbuf.relocate(cbuf.insts_mark(), reloc, format); 699 cbuf.insts()->emit_int64(d64); 700 } 701 702 // emit 64 bit value and construct relocation entry from RelocationHolder 703 void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) { 704 #ifdef ASSERT 705 if (rspec.reloc()->type() == relocInfo::oop_type && 706 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { 707 assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()), 708 "cannot embed scavengable oops in code"); 709 } 710 #endif 711 cbuf.relocate(cbuf.insts_mark(), rspec, format); 712 cbuf.insts()->emit_int64(d64); 713 } 714 715 // Access stack slot for load or store 716 void store_to_stackslot(CodeBuffer &cbuf, int opcode, int rm_field, int disp) 717 { 718 emit_opcode(cbuf, opcode); // (e.g., FILD [RSP+src]) 719 if (-0x80 <= disp && disp < 0x80) { 720 emit_rm(cbuf, 0x01, rm_field, RSP_enc); // R/M byte 721 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte 722 emit_d8(cbuf, disp); // Displacement // R/M byte 723 } else { 724 emit_rm(cbuf, 0x02, rm_field, RSP_enc); // R/M byte 725 emit_rm(cbuf, 0x00, RSP_enc, RSP_enc); // SIB byte 726 emit_d32(cbuf, disp); // Displacement // R/M byte 727 } 728 } 729 730 // rRegI ereg, memory mem) %{ // emit_reg_mem 731 void encode_RegMem(CodeBuffer &cbuf, 732 int reg, 733 int base, int index, int scale, int disp, bool disp_is_oop) 734 { 735 assert(!disp_is_oop, "cannot have disp"); 736 int regenc = reg & 7; 737 int baseenc = base & 7; 738 int indexenc = index & 7; 739 740 // There is no index & no scale, use form without SIB byte 741 if (index == 0x4 && scale == 0 && base != RSP_enc && base != R12_enc) { 742 // If no displacement, mode is 0x0; unless base is [RBP] or [R13] 743 if (disp == 0 && base != RBP_enc && base != R13_enc) { 744 emit_rm(cbuf, 0x0, regenc, baseenc); // * 745 } else if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) { 746 // If 8-bit displacement, mode 0x1 747 emit_rm(cbuf, 0x1, regenc, baseenc); // * 748 emit_d8(cbuf, disp); 749 } else { 750 // If 32-bit displacement 751 if (base == -1) { // Special flag for absolute address 752 emit_rm(cbuf, 0x0, regenc, 0x5); // * 753 if (disp_is_oop) { 754 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32); 755 } else { 756 emit_d32(cbuf, disp); 757 } 758 } else { 759 // Normal base + offset 760 emit_rm(cbuf, 0x2, regenc, baseenc); // * 761 if (disp_is_oop) { 762 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32); 763 } else { 764 emit_d32(cbuf, disp); 765 } 766 } 767 } 768 } else { 769 // Else, encode with the SIB byte 770 // If no displacement, mode is 0x0; unless base is [RBP] or [R13] 771 if (disp == 0 && base != RBP_enc && base != R13_enc) { 772 // If no displacement 773 emit_rm(cbuf, 0x0, regenc, 0x4); // * 774 emit_rm(cbuf, scale, indexenc, baseenc); 775 } else { 776 if (-0x80 <= disp && disp < 0x80 && !disp_is_oop) { 777 // If 8-bit displacement, mode 0x1 778 emit_rm(cbuf, 0x1, regenc, 0x4); // * 779 emit_rm(cbuf, scale, indexenc, baseenc); 780 emit_d8(cbuf, disp); 781 } else { 782 // If 32-bit displacement 783 if (base == 0x04 ) { 784 emit_rm(cbuf, 0x2, regenc, 0x4); 785 emit_rm(cbuf, scale, indexenc, 0x04); // XXX is this valid??? 786 } else { 787 emit_rm(cbuf, 0x2, regenc, 0x4); 788 emit_rm(cbuf, scale, indexenc, baseenc); // * 789 } 790 if (disp_is_oop) { 791 emit_d32_reloc(cbuf, disp, relocInfo::oop_type, RELOC_DISP32); 792 } else { 793 emit_d32(cbuf, disp); 794 } 795 } 796 } 797 } 798 } 799 800 void encode_copy(CodeBuffer &cbuf, int dstenc, int srcenc) 801 { 802 if (dstenc != srcenc) { 803 if (dstenc < 8) { 804 if (srcenc >= 8) { 805 emit_opcode(cbuf, Assembler::REX_B); 806 srcenc -= 8; 807 } 808 } else { 809 if (srcenc < 8) { 810 emit_opcode(cbuf, Assembler::REX_R); 811 } else { 812 emit_opcode(cbuf, Assembler::REX_RB); 813 srcenc -= 8; 814 } 815 dstenc -= 8; 816 } 817 818 emit_opcode(cbuf, 0x8B); 819 emit_rm(cbuf, 0x3, dstenc, srcenc); 820 } 821 } 822 823 void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) { 824 if( dst_encoding == src_encoding ) { 825 // reg-reg copy, use an empty encoding 826 } else { 827 MacroAssembler _masm(&cbuf); 828 829 __ movdqa(as_XMMRegister(dst_encoding), as_XMMRegister(src_encoding)); 830 } 831 } 832 833 // This could be in MacroAssembler but it's fairly C2 specific 834 void emit_cmpfp_fixup(MacroAssembler& _masm) { 835 Label exit; 836 __ jccb(Assembler::noParity, exit); 837 __ pushf(); 838 __ andq(Address(rsp, 0), 0xffffff2b); 839 __ popf(); 840 __ bind(exit); 841 __ nop(); // (target for branch to avoid branch to branch) 842 } 843 844 845 //============================================================================= 846 const bool Matcher::constant_table_absolute_addressing = true; 847 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; 848 849 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 850 // Empty encoding 851 } 852 853 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 854 return 0; 855 } 856 857 #ifndef PRODUCT 858 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 859 st->print("# MachConstantBaseNode (empty encoding)"); 860 } 861 #endif 862 863 864 //============================================================================= 865 #ifndef PRODUCT 866 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const 867 { 868 Compile* C = ra_->C; 869 870 int framesize = C->frame_slots() << LogBytesPerInt; 871 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 872 // Remove wordSize for return adr already pushed 873 // and another for the RBP we are going to save 874 framesize -= 2*wordSize; 875 bool need_nop = true; 876 877 // Calls to C2R adapters often do not accept exceptional returns. 878 // We require that their callers must bang for them. But be 879 // careful, because some VM calls (such as call site linkage) can 880 // use several kilobytes of stack. But the stack safety zone should 881 // account for that. See bugs 4446381, 4468289, 4497237. 882 if (C->need_stack_bang(framesize)) { 883 st->print_cr("# stack bang"); st->print("\t"); 884 need_nop = false; 885 } 886 st->print_cr("pushq rbp"); st->print("\t"); 887 888 if (VerifyStackAtCalls) { 889 // Majik cookie to verify stack depth 890 st->print_cr("pushq 0xffffffffbadb100d" 891 "\t# Majik cookie for stack depth check"); 892 st->print("\t"); 893 framesize -= wordSize; // Remove 2 for cookie 894 need_nop = false; 895 } 896 897 if (framesize) { 898 st->print("subq rsp, #%d\t# Create frame", framesize); 899 if (framesize < 0x80 && need_nop) { 900 st->print("\n\tnop\t# nop for patch_verified_entry"); 901 } 902 } 903 } 904 #endif 905 906 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const 907 { 908 Compile* C = ra_->C; 909 910 // WARNING: Initial instruction MUST be 5 bytes or longer so that 911 // NativeJump::patch_verified_entry will be able to patch out the entry 912 // code safely. The fldcw is ok at 6 bytes, the push to verify stack 913 // depth is ok at 5 bytes, the frame allocation can be either 3 or 914 // 6 bytes. So if we don't do the fldcw or the push then we must 915 // use the 6 byte frame allocation even if we have no frame. :-( 916 // If method sets FPU control word do it now 917 918 int framesize = C->frame_slots() << LogBytesPerInt; 919 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 920 // Remove wordSize for return adr already pushed 921 // and another for the RBP we are going to save 922 framesize -= 2*wordSize; 923 bool need_nop = true; 924 925 // Calls to C2R adapters often do not accept exceptional returns. 926 // We require that their callers must bang for them. But be 927 // careful, because some VM calls (such as call site linkage) can 928 // use several kilobytes of stack. But the stack safety zone should 929 // account for that. See bugs 4446381, 4468289, 4497237. 930 if (C->need_stack_bang(framesize)) { 931 MacroAssembler masm(&cbuf); 932 masm.generate_stack_overflow_check(framesize); 933 need_nop = false; 934 } 935 936 // We always push rbp so that on return to interpreter rbp will be 937 // restored correctly and we can correct the stack. 938 emit_opcode(cbuf, 0x50 | RBP_enc); 939 940 if (VerifyStackAtCalls) { 941 // Majik cookie to verify stack depth 942 emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d 943 emit_d32(cbuf, 0xbadb100d); 944 framesize -= wordSize; // Remove 2 for cookie 945 need_nop = false; 946 } 947 948 if (framesize) { 949 emit_opcode(cbuf, Assembler::REX_W); 950 if (framesize < 0x80) { 951 emit_opcode(cbuf, 0x83); // sub SP,#framesize 952 emit_rm(cbuf, 0x3, 0x05, RSP_enc); 953 emit_d8(cbuf, framesize); 954 if (need_nop) { 955 emit_opcode(cbuf, 0x90); // nop 956 } 957 } else { 958 emit_opcode(cbuf, 0x81); // sub SP,#framesize 959 emit_rm(cbuf, 0x3, 0x05, RSP_enc); 960 emit_d32(cbuf, framesize); 961 } 962 } 963 964 C->set_frame_complete(cbuf.insts_size()); 965 966 #ifdef ASSERT 967 if (VerifyStackAtCalls) { 968 Label L; 969 MacroAssembler masm(&cbuf); 970 masm.push(rax); 971 masm.mov(rax, rsp); 972 masm.andptr(rax, StackAlignmentInBytes-1); 973 masm.cmpptr(rax, StackAlignmentInBytes-wordSize); 974 masm.pop(rax); 975 masm.jcc(Assembler::equal, L); 976 masm.stop("Stack is not properly aligned!"); 977 masm.bind(L); 978 } 979 #endif 980 } 981 982 uint MachPrologNode::size(PhaseRegAlloc* ra_) const 983 { 984 return MachNode::size(ra_); // too many variables; just compute it 985 // the hard way 986 } 987 988 int MachPrologNode::reloc() const 989 { 990 return 0; // a large enough number 991 } 992 993 //============================================================================= 994 #ifndef PRODUCT 995 void MachEpilogNode::format(PhaseRegAlloc* ra_, outputStream* st) const 996 { 997 Compile* C = ra_->C; 998 int framesize = C->frame_slots() << LogBytesPerInt; 999 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 1000 // Remove word for return adr already pushed 1001 // and RBP 1002 framesize -= 2*wordSize; 1003 1004 if (framesize) { 1005 st->print_cr("addq rsp, %d\t# Destroy frame", framesize); 1006 st->print("\t"); 1007 } 1008 1009 st->print_cr("popq rbp"); 1010 if (do_polling() && C->is_method_compilation()) { 1011 st->print("\t"); 1012 if (Assembler::is_polling_page_far()) { 1013 st->print_cr("movq rscratch1, #polling_page_address\n\t" 1014 "testl rax, [rscratch1]\t" 1015 "# Safepoint: poll for GC"); 1016 } else { 1017 st->print_cr("testl rax, [rip + #offset_to_poll_page]\t" 1018 "# Safepoint: poll for GC"); 1019 } 1020 } 1021 } 1022 #endif 1023 1024 void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 1025 { 1026 Compile* C = ra_->C; 1027 int framesize = C->frame_slots() << LogBytesPerInt; 1028 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 1029 // Remove word for return adr already pushed 1030 // and RBP 1031 framesize -= 2*wordSize; 1032 1033 // Note that VerifyStackAtCalls' Majik cookie does not change the frame size popped here 1034 1035 if (framesize) { 1036 emit_opcode(cbuf, Assembler::REX_W); 1037 if (framesize < 0x80) { 1038 emit_opcode(cbuf, 0x83); // addq rsp, #framesize 1039 emit_rm(cbuf, 0x3, 0x00, RSP_enc); 1040 emit_d8(cbuf, framesize); 1041 } else { 1042 emit_opcode(cbuf, 0x81); // addq rsp, #framesize 1043 emit_rm(cbuf, 0x3, 0x00, RSP_enc); 1044 emit_d32(cbuf, framesize); 1045 } 1046 } 1047 1048 // popq rbp 1049 emit_opcode(cbuf, 0x58 | RBP_enc); 1050 1051 if (do_polling() && C->is_method_compilation()) { 1052 MacroAssembler _masm(&cbuf); 1053 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type); 1054 if (Assembler::is_polling_page_far()) { 1055 __ lea(rscratch1, polling_page); 1056 __ relocate(relocInfo::poll_return_type); 1057 __ testl(rax, Address(rscratch1, 0)); 1058 } else { 1059 __ testl(rax, polling_page); 1060 } 1061 } 1062 } 1063 1064 uint MachEpilogNode::size(PhaseRegAlloc* ra_) const 1065 { 1066 return MachNode::size(ra_); // too many variables; just compute it 1067 // the hard way 1068 } 1069 1070 int MachEpilogNode::reloc() const 1071 { 1072 return 2; // a large enough number 1073 } 1074 1075 const Pipeline* MachEpilogNode::pipeline() const 1076 { 1077 return MachNode::pipeline_class(); 1078 } 1079 1080 int MachEpilogNode::safepoint_offset() const 1081 { 1082 return 0; 1083 } 1084 1085 //============================================================================= 1086 1087 enum RC { 1088 rc_bad, 1089 rc_int, 1090 rc_float, 1091 rc_stack 1092 }; 1093 1094 static enum RC rc_class(OptoReg::Name reg) 1095 { 1096 if( !OptoReg::is_valid(reg) ) return rc_bad; 1097 1098 if (OptoReg::is_stack(reg)) return rc_stack; 1099 1100 VMReg r = OptoReg::as_VMReg(reg); 1101 1102 if (r->is_Register()) return rc_int; 1103 1104 assert(r->is_XMMRegister(), "must be"); 1105 return rc_float; 1106 } 1107 1108 uint MachSpillCopyNode::implementation(CodeBuffer* cbuf, 1109 PhaseRegAlloc* ra_, 1110 bool do_size, 1111 outputStream* st) const 1112 { 1113 1114 // Get registers to move 1115 OptoReg::Name src_second = ra_->get_reg_second(in(1)); 1116 OptoReg::Name src_first = ra_->get_reg_first(in(1)); 1117 OptoReg::Name dst_second = ra_->get_reg_second(this); 1118 OptoReg::Name dst_first = ra_->get_reg_first(this); 1119 1120 enum RC src_second_rc = rc_class(src_second); 1121 enum RC src_first_rc = rc_class(src_first); 1122 enum RC dst_second_rc = rc_class(dst_second); 1123 enum RC dst_first_rc = rc_class(dst_first); 1124 1125 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), 1126 "must move at least 1 register" ); 1127 1128 if (src_first == dst_first && src_second == dst_second) { 1129 // Self copy, no move 1130 return 0; 1131 } else if (src_first_rc == rc_stack) { 1132 // mem -> 1133 if (dst_first_rc == rc_stack) { 1134 // mem -> mem 1135 assert(src_second != dst_first, "overlap"); 1136 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1137 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1138 // 64-bit 1139 int src_offset = ra_->reg2offset(src_first); 1140 int dst_offset = ra_->reg2offset(dst_first); 1141 if (cbuf) { 1142 emit_opcode(*cbuf, 0xFF); 1143 encode_RegMem(*cbuf, RSI_enc, RSP_enc, 0x4, 0, src_offset, false); 1144 1145 emit_opcode(*cbuf, 0x8F); 1146 encode_RegMem(*cbuf, RAX_enc, RSP_enc, 0x4, 0, dst_offset, false); 1147 1148 #ifndef PRODUCT 1149 } else if (!do_size) { 1150 st->print("pushq [rsp + #%d]\t# 64-bit mem-mem spill\n\t" 1151 "popq [rsp + #%d]", 1152 src_offset, 1153 dst_offset); 1154 #endif 1155 } 1156 return 1157 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + 1158 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)); 1159 } else { 1160 // 32-bit 1161 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1162 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1163 // No pushl/popl, so: 1164 int src_offset = ra_->reg2offset(src_first); 1165 int dst_offset = ra_->reg2offset(dst_first); 1166 if (cbuf) { 1167 emit_opcode(*cbuf, Assembler::REX_W); 1168 emit_opcode(*cbuf, 0x89); 1169 emit_opcode(*cbuf, 0x44); 1170 emit_opcode(*cbuf, 0x24); 1171 emit_opcode(*cbuf, 0xF8); 1172 1173 emit_opcode(*cbuf, 0x8B); 1174 encode_RegMem(*cbuf, 1175 RAX_enc, 1176 RSP_enc, 0x4, 0, src_offset, 1177 false); 1178 1179 emit_opcode(*cbuf, 0x89); 1180 encode_RegMem(*cbuf, 1181 RAX_enc, 1182 RSP_enc, 0x4, 0, dst_offset, 1183 false); 1184 1185 emit_opcode(*cbuf, Assembler::REX_W); 1186 emit_opcode(*cbuf, 0x8B); 1187 emit_opcode(*cbuf, 0x44); 1188 emit_opcode(*cbuf, 0x24); 1189 emit_opcode(*cbuf, 0xF8); 1190 1191 #ifndef PRODUCT 1192 } else if (!do_size) { 1193 st->print("movq [rsp - #8], rax\t# 32-bit mem-mem spill\n\t" 1194 "movl rax, [rsp + #%d]\n\t" 1195 "movl [rsp + #%d], rax\n\t" 1196 "movq rax, [rsp - #8]", 1197 src_offset, 1198 dst_offset); 1199 #endif 1200 } 1201 return 1202 5 + // movq 1203 3 + ((src_offset == 0) ? 0 : (src_offset < 0x80 ? 1 : 4)) + // movl 1204 3 + ((dst_offset == 0) ? 0 : (dst_offset < 0x80 ? 1 : 4)) + // movl 1205 5; // movq 1206 } 1207 } else if (dst_first_rc == rc_int) { 1208 // mem -> gpr 1209 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1210 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1211 // 64-bit 1212 int offset = ra_->reg2offset(src_first); 1213 if (cbuf) { 1214 if (Matcher::_regEncode[dst_first] < 8) { 1215 emit_opcode(*cbuf, Assembler::REX_W); 1216 } else { 1217 emit_opcode(*cbuf, Assembler::REX_WR); 1218 } 1219 emit_opcode(*cbuf, 0x8B); 1220 encode_RegMem(*cbuf, 1221 Matcher::_regEncode[dst_first], 1222 RSP_enc, 0x4, 0, offset, 1223 false); 1224 #ifndef PRODUCT 1225 } else if (!do_size) { 1226 st->print("movq %s, [rsp + #%d]\t# spill", 1227 Matcher::regName[dst_first], 1228 offset); 1229 #endif 1230 } 1231 return 1232 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX 1233 } else { 1234 // 32-bit 1235 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1236 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1237 int offset = ra_->reg2offset(src_first); 1238 if (cbuf) { 1239 if (Matcher::_regEncode[dst_first] >= 8) { 1240 emit_opcode(*cbuf, Assembler::REX_R); 1241 } 1242 emit_opcode(*cbuf, 0x8B); 1243 encode_RegMem(*cbuf, 1244 Matcher::_regEncode[dst_first], 1245 RSP_enc, 0x4, 0, offset, 1246 false); 1247 #ifndef PRODUCT 1248 } else if (!do_size) { 1249 st->print("movl %s, [rsp + #%d]\t# spill", 1250 Matcher::regName[dst_first], 1251 offset); 1252 #endif 1253 } 1254 return 1255 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 1256 ((Matcher::_regEncode[dst_first] < 8) 1257 ? 3 1258 : 4); // REX 1259 } 1260 } else if (dst_first_rc == rc_float) { 1261 // mem-> xmm 1262 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1263 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1264 // 64-bit 1265 int offset = ra_->reg2offset(src_first); 1266 if (cbuf) { 1267 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66); 1268 if (Matcher::_regEncode[dst_first] >= 8) { 1269 emit_opcode(*cbuf, Assembler::REX_R); 1270 } 1271 emit_opcode(*cbuf, 0x0F); 1272 emit_opcode(*cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12); 1273 encode_RegMem(*cbuf, 1274 Matcher::_regEncode[dst_first], 1275 RSP_enc, 0x4, 0, offset, 1276 false); 1277 #ifndef PRODUCT 1278 } else if (!do_size) { 1279 st->print("%s %s, [rsp + #%d]\t# spill", 1280 UseXmmLoadAndClearUpper ? "movsd " : "movlpd", 1281 Matcher::regName[dst_first], 1282 offset); 1283 #endif 1284 } 1285 return 1286 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 1287 ((Matcher::_regEncode[dst_first] < 8) 1288 ? 5 1289 : 6); // REX 1290 } else { 1291 // 32-bit 1292 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1293 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1294 int offset = ra_->reg2offset(src_first); 1295 if (cbuf) { 1296 emit_opcode(*cbuf, 0xF3); 1297 if (Matcher::_regEncode[dst_first] >= 8) { 1298 emit_opcode(*cbuf, Assembler::REX_R); 1299 } 1300 emit_opcode(*cbuf, 0x0F); 1301 emit_opcode(*cbuf, 0x10); 1302 encode_RegMem(*cbuf, 1303 Matcher::_regEncode[dst_first], 1304 RSP_enc, 0x4, 0, offset, 1305 false); 1306 #ifndef PRODUCT 1307 } else if (!do_size) { 1308 st->print("movss %s, [rsp + #%d]\t# spill", 1309 Matcher::regName[dst_first], 1310 offset); 1311 #endif 1312 } 1313 return 1314 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 1315 ((Matcher::_regEncode[dst_first] < 8) 1316 ? 5 1317 : 6); // REX 1318 } 1319 } 1320 } else if (src_first_rc == rc_int) { 1321 // gpr -> 1322 if (dst_first_rc == rc_stack) { 1323 // gpr -> mem 1324 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1325 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1326 // 64-bit 1327 int offset = ra_->reg2offset(dst_first); 1328 if (cbuf) { 1329 if (Matcher::_regEncode[src_first] < 8) { 1330 emit_opcode(*cbuf, Assembler::REX_W); 1331 } else { 1332 emit_opcode(*cbuf, Assembler::REX_WR); 1333 } 1334 emit_opcode(*cbuf, 0x89); 1335 encode_RegMem(*cbuf, 1336 Matcher::_regEncode[src_first], 1337 RSP_enc, 0x4, 0, offset, 1338 false); 1339 #ifndef PRODUCT 1340 } else if (!do_size) { 1341 st->print("movq [rsp + #%d], %s\t# spill", 1342 offset, 1343 Matcher::regName[src_first]); 1344 #endif 1345 } 1346 return ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 4; // REX 1347 } else { 1348 // 32-bit 1349 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1350 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1351 int offset = ra_->reg2offset(dst_first); 1352 if (cbuf) { 1353 if (Matcher::_regEncode[src_first] >= 8) { 1354 emit_opcode(*cbuf, Assembler::REX_R); 1355 } 1356 emit_opcode(*cbuf, 0x89); 1357 encode_RegMem(*cbuf, 1358 Matcher::_regEncode[src_first], 1359 RSP_enc, 0x4, 0, offset, 1360 false); 1361 #ifndef PRODUCT 1362 } else if (!do_size) { 1363 st->print("movl [rsp + #%d], %s\t# spill", 1364 offset, 1365 Matcher::regName[src_first]); 1366 #endif 1367 } 1368 return 1369 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 1370 ((Matcher::_regEncode[src_first] < 8) 1371 ? 3 1372 : 4); // REX 1373 } 1374 } else if (dst_first_rc == rc_int) { 1375 // gpr -> gpr 1376 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1377 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1378 // 64-bit 1379 if (cbuf) { 1380 if (Matcher::_regEncode[dst_first] < 8) { 1381 if (Matcher::_regEncode[src_first] < 8) { 1382 emit_opcode(*cbuf, Assembler::REX_W); 1383 } else { 1384 emit_opcode(*cbuf, Assembler::REX_WB); 1385 } 1386 } else { 1387 if (Matcher::_regEncode[src_first] < 8) { 1388 emit_opcode(*cbuf, Assembler::REX_WR); 1389 } else { 1390 emit_opcode(*cbuf, Assembler::REX_WRB); 1391 } 1392 } 1393 emit_opcode(*cbuf, 0x8B); 1394 emit_rm(*cbuf, 0x3, 1395 Matcher::_regEncode[dst_first] & 7, 1396 Matcher::_regEncode[src_first] & 7); 1397 #ifndef PRODUCT 1398 } else if (!do_size) { 1399 st->print("movq %s, %s\t# spill", 1400 Matcher::regName[dst_first], 1401 Matcher::regName[src_first]); 1402 #endif 1403 } 1404 return 3; // REX 1405 } else { 1406 // 32-bit 1407 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1408 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1409 if (cbuf) { 1410 if (Matcher::_regEncode[dst_first] < 8) { 1411 if (Matcher::_regEncode[src_first] >= 8) { 1412 emit_opcode(*cbuf, Assembler::REX_B); 1413 } 1414 } else { 1415 if (Matcher::_regEncode[src_first] < 8) { 1416 emit_opcode(*cbuf, Assembler::REX_R); 1417 } else { 1418 emit_opcode(*cbuf, Assembler::REX_RB); 1419 } 1420 } 1421 emit_opcode(*cbuf, 0x8B); 1422 emit_rm(*cbuf, 0x3, 1423 Matcher::_regEncode[dst_first] & 7, 1424 Matcher::_regEncode[src_first] & 7); 1425 #ifndef PRODUCT 1426 } else if (!do_size) { 1427 st->print("movl %s, %s\t# spill", 1428 Matcher::regName[dst_first], 1429 Matcher::regName[src_first]); 1430 #endif 1431 } 1432 return 1433 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) 1434 ? 2 1435 : 3; // REX 1436 } 1437 } else if (dst_first_rc == rc_float) { 1438 // gpr -> xmm 1439 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1440 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1441 // 64-bit 1442 if (cbuf) { 1443 emit_opcode(*cbuf, 0x66); 1444 if (Matcher::_regEncode[dst_first] < 8) { 1445 if (Matcher::_regEncode[src_first] < 8) { 1446 emit_opcode(*cbuf, Assembler::REX_W); 1447 } else { 1448 emit_opcode(*cbuf, Assembler::REX_WB); 1449 } 1450 } else { 1451 if (Matcher::_regEncode[src_first] < 8) { 1452 emit_opcode(*cbuf, Assembler::REX_WR); 1453 } else { 1454 emit_opcode(*cbuf, Assembler::REX_WRB); 1455 } 1456 } 1457 emit_opcode(*cbuf, 0x0F); 1458 emit_opcode(*cbuf, 0x6E); 1459 emit_rm(*cbuf, 0x3, 1460 Matcher::_regEncode[dst_first] & 7, 1461 Matcher::_regEncode[src_first] & 7); 1462 #ifndef PRODUCT 1463 } else if (!do_size) { 1464 st->print("movdq %s, %s\t# spill", 1465 Matcher::regName[dst_first], 1466 Matcher::regName[src_first]); 1467 #endif 1468 } 1469 return 5; // REX 1470 } else { 1471 // 32-bit 1472 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1473 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1474 if (cbuf) { 1475 emit_opcode(*cbuf, 0x66); 1476 if (Matcher::_regEncode[dst_first] < 8) { 1477 if (Matcher::_regEncode[src_first] >= 8) { 1478 emit_opcode(*cbuf, Assembler::REX_B); 1479 } 1480 } else { 1481 if (Matcher::_regEncode[src_first] < 8) { 1482 emit_opcode(*cbuf, Assembler::REX_R); 1483 } else { 1484 emit_opcode(*cbuf, Assembler::REX_RB); 1485 } 1486 } 1487 emit_opcode(*cbuf, 0x0F); 1488 emit_opcode(*cbuf, 0x6E); 1489 emit_rm(*cbuf, 0x3, 1490 Matcher::_regEncode[dst_first] & 7, 1491 Matcher::_regEncode[src_first] & 7); 1492 #ifndef PRODUCT 1493 } else if (!do_size) { 1494 st->print("movdl %s, %s\t# spill", 1495 Matcher::regName[dst_first], 1496 Matcher::regName[src_first]); 1497 #endif 1498 } 1499 return 1500 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) 1501 ? 4 1502 : 5; // REX 1503 } 1504 } 1505 } else if (src_first_rc == rc_float) { 1506 // xmm -> 1507 if (dst_first_rc == rc_stack) { 1508 // xmm -> mem 1509 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1510 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1511 // 64-bit 1512 int offset = ra_->reg2offset(dst_first); 1513 if (cbuf) { 1514 emit_opcode(*cbuf, 0xF2); 1515 if (Matcher::_regEncode[src_first] >= 8) { 1516 emit_opcode(*cbuf, Assembler::REX_R); 1517 } 1518 emit_opcode(*cbuf, 0x0F); 1519 emit_opcode(*cbuf, 0x11); 1520 encode_RegMem(*cbuf, 1521 Matcher::_regEncode[src_first], 1522 RSP_enc, 0x4, 0, offset, 1523 false); 1524 #ifndef PRODUCT 1525 } else if (!do_size) { 1526 st->print("movsd [rsp + #%d], %s\t# spill", 1527 offset, 1528 Matcher::regName[src_first]); 1529 #endif 1530 } 1531 return 1532 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 1533 ((Matcher::_regEncode[src_first] < 8) 1534 ? 5 1535 : 6); // REX 1536 } else { 1537 // 32-bit 1538 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1539 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1540 int offset = ra_->reg2offset(dst_first); 1541 if (cbuf) { 1542 emit_opcode(*cbuf, 0xF3); 1543 if (Matcher::_regEncode[src_first] >= 8) { 1544 emit_opcode(*cbuf, Assembler::REX_R); 1545 } 1546 emit_opcode(*cbuf, 0x0F); 1547 emit_opcode(*cbuf, 0x11); 1548 encode_RegMem(*cbuf, 1549 Matcher::_regEncode[src_first], 1550 RSP_enc, 0x4, 0, offset, 1551 false); 1552 #ifndef PRODUCT 1553 } else if (!do_size) { 1554 st->print("movss [rsp + #%d], %s\t# spill", 1555 offset, 1556 Matcher::regName[src_first]); 1557 #endif 1558 } 1559 return 1560 ((offset == 0) ? 0 : (offset < 0x80 ? 1 : 4)) + 1561 ((Matcher::_regEncode[src_first] < 8) 1562 ? 5 1563 : 6); // REX 1564 } 1565 } else if (dst_first_rc == rc_int) { 1566 // xmm -> gpr 1567 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1568 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1569 // 64-bit 1570 if (cbuf) { 1571 emit_opcode(*cbuf, 0x66); 1572 if (Matcher::_regEncode[dst_first] < 8) { 1573 if (Matcher::_regEncode[src_first] < 8) { 1574 emit_opcode(*cbuf, Assembler::REX_W); 1575 } else { 1576 emit_opcode(*cbuf, Assembler::REX_WR); // attention! 1577 } 1578 } else { 1579 if (Matcher::_regEncode[src_first] < 8) { 1580 emit_opcode(*cbuf, Assembler::REX_WB); // attention! 1581 } else { 1582 emit_opcode(*cbuf, Assembler::REX_WRB); 1583 } 1584 } 1585 emit_opcode(*cbuf, 0x0F); 1586 emit_opcode(*cbuf, 0x7E); 1587 emit_rm(*cbuf, 0x3, 1588 Matcher::_regEncode[src_first] & 7, 1589 Matcher::_regEncode[dst_first] & 7); 1590 #ifndef PRODUCT 1591 } else if (!do_size) { 1592 st->print("movdq %s, %s\t# spill", 1593 Matcher::regName[dst_first], 1594 Matcher::regName[src_first]); 1595 #endif 1596 } 1597 return 5; // REX 1598 } else { 1599 // 32-bit 1600 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1601 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1602 if (cbuf) { 1603 emit_opcode(*cbuf, 0x66); 1604 if (Matcher::_regEncode[dst_first] < 8) { 1605 if (Matcher::_regEncode[src_first] >= 8) { 1606 emit_opcode(*cbuf, Assembler::REX_R); // attention! 1607 } 1608 } else { 1609 if (Matcher::_regEncode[src_first] < 8) { 1610 emit_opcode(*cbuf, Assembler::REX_B); // attention! 1611 } else { 1612 emit_opcode(*cbuf, Assembler::REX_RB); 1613 } 1614 } 1615 emit_opcode(*cbuf, 0x0F); 1616 emit_opcode(*cbuf, 0x7E); 1617 emit_rm(*cbuf, 0x3, 1618 Matcher::_regEncode[src_first] & 7, 1619 Matcher::_regEncode[dst_first] & 7); 1620 #ifndef PRODUCT 1621 } else if (!do_size) { 1622 st->print("movdl %s, %s\t# spill", 1623 Matcher::regName[dst_first], 1624 Matcher::regName[src_first]); 1625 #endif 1626 } 1627 return 1628 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) 1629 ? 4 1630 : 5; // REX 1631 } 1632 } else if (dst_first_rc == rc_float) { 1633 // xmm -> xmm 1634 if ((src_first & 1) == 0 && src_first + 1 == src_second && 1635 (dst_first & 1) == 0 && dst_first + 1 == dst_second) { 1636 // 64-bit 1637 if (cbuf) { 1638 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2); 1639 if (Matcher::_regEncode[dst_first] < 8) { 1640 if (Matcher::_regEncode[src_first] >= 8) { 1641 emit_opcode(*cbuf, Assembler::REX_B); 1642 } 1643 } else { 1644 if (Matcher::_regEncode[src_first] < 8) { 1645 emit_opcode(*cbuf, Assembler::REX_R); 1646 } else { 1647 emit_opcode(*cbuf, Assembler::REX_RB); 1648 } 1649 } 1650 emit_opcode(*cbuf, 0x0F); 1651 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10); 1652 emit_rm(*cbuf, 0x3, 1653 Matcher::_regEncode[dst_first] & 7, 1654 Matcher::_regEncode[src_first] & 7); 1655 #ifndef PRODUCT 1656 } else if (!do_size) { 1657 st->print("%s %s, %s\t# spill", 1658 UseXmmRegToRegMoveAll ? "movapd" : "movsd ", 1659 Matcher::regName[dst_first], 1660 Matcher::regName[src_first]); 1661 #endif 1662 } 1663 return 1664 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) 1665 ? 4 1666 : 5; // REX 1667 } else { 1668 // 32-bit 1669 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform"); 1670 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform"); 1671 if (cbuf) { 1672 if (!UseXmmRegToRegMoveAll) 1673 emit_opcode(*cbuf, 0xF3); 1674 if (Matcher::_regEncode[dst_first] < 8) { 1675 if (Matcher::_regEncode[src_first] >= 8) { 1676 emit_opcode(*cbuf, Assembler::REX_B); 1677 } 1678 } else { 1679 if (Matcher::_regEncode[src_first] < 8) { 1680 emit_opcode(*cbuf, Assembler::REX_R); 1681 } else { 1682 emit_opcode(*cbuf, Assembler::REX_RB); 1683 } 1684 } 1685 emit_opcode(*cbuf, 0x0F); 1686 emit_opcode(*cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10); 1687 emit_rm(*cbuf, 0x3, 1688 Matcher::_regEncode[dst_first] & 7, 1689 Matcher::_regEncode[src_first] & 7); 1690 #ifndef PRODUCT 1691 } else if (!do_size) { 1692 st->print("%s %s, %s\t# spill", 1693 UseXmmRegToRegMoveAll ? "movaps" : "movss ", 1694 Matcher::regName[dst_first], 1695 Matcher::regName[src_first]); 1696 #endif 1697 } 1698 return 1699 (Matcher::_regEncode[src_first] < 8 && Matcher::_regEncode[dst_first] < 8) 1700 ? (UseXmmRegToRegMoveAll ? 3 : 4) 1701 : (UseXmmRegToRegMoveAll ? 4 : 5); // REX 1702 } 1703 } 1704 } 1705 1706 assert(0," foo "); 1707 Unimplemented(); 1708 1709 return 0; 1710 } 1711 1712 #ifndef PRODUCT 1713 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const 1714 { 1715 implementation(NULL, ra_, false, st); 1716 } 1717 #endif 1718 1719 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const 1720 { 1721 implementation(&cbuf, ra_, false, NULL); 1722 } 1723 1724 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const 1725 { 1726 return implementation(NULL, ra_, true, NULL); 1727 } 1728 1729 //============================================================================= 1730 #ifndef PRODUCT 1731 void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const 1732 { 1733 st->print("nop \t# %d bytes pad for loops and calls", _count); 1734 } 1735 #endif 1736 1737 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const 1738 { 1739 MacroAssembler _masm(&cbuf); 1740 __ nop(_count); 1741 } 1742 1743 uint MachNopNode::size(PhaseRegAlloc*) const 1744 { 1745 return _count; 1746 } 1747 1748 1749 //============================================================================= 1750 #ifndef PRODUCT 1751 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const 1752 { 1753 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1754 int reg = ra_->get_reg_first(this); 1755 st->print("leaq %s, [rsp + #%d]\t# box lock", 1756 Matcher::regName[reg], offset); 1757 } 1758 #endif 1759 1760 void BoxLockNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 1761 { 1762 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1763 int reg = ra_->get_encode(this); 1764 if (offset >= 0x80) { 1765 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR); 1766 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset] 1767 emit_rm(cbuf, 0x2, reg & 7, 0x04); 1768 emit_rm(cbuf, 0x0, 0x04, RSP_enc); 1769 emit_d32(cbuf, offset); 1770 } else { 1771 emit_opcode(cbuf, reg < 8 ? Assembler::REX_W : Assembler::REX_WR); 1772 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset] 1773 emit_rm(cbuf, 0x1, reg & 7, 0x04); 1774 emit_rm(cbuf, 0x0, 0x04, RSP_enc); 1775 emit_d8(cbuf, offset); 1776 } 1777 } 1778 1779 uint BoxLockNode::size(PhaseRegAlloc *ra_) const 1780 { 1781 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); 1782 return (offset < 0x80) ? 5 : 8; // REX 1783 } 1784 1785 //============================================================================= 1786 1787 // emit call stub, compiled java to interpreter 1788 void emit_java_to_interp(CodeBuffer& cbuf) 1789 { 1790 // Stub is fixed up when the corresponding call is converted from 1791 // calling compiled code to calling interpreted code. 1792 // movq rbx, 0 1793 // jmp -5 # to self 1794 1795 address mark = cbuf.insts_mark(); // get mark within main instrs section 1796 1797 // Note that the code buffer's insts_mark is always relative to insts. 1798 // That's why we must use the macroassembler to generate a stub. 1799 MacroAssembler _masm(&cbuf); 1800 1801 address base = 1802 __ start_a_stub(Compile::MAX_stubs_size); 1803 if (base == NULL) return; // CodeBuffer::expand failed 1804 // static stub relocation stores the instruction address of the call 1805 __ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64); 1806 // static stub relocation also tags the methodOop in the code-stream. 1807 __ movoop(rbx, (jobject) NULL); // method is zapped till fixup time 1808 // This is recognized as unresolved by relocs/nativeinst/ic code 1809 __ jump(RuntimeAddress(__ pc())); 1810 1811 // Update current stubs pointer and restore insts_end. 1812 __ end_a_stub(); 1813 } 1814 1815 // size of call stub, compiled java to interpretor 1816 uint size_java_to_interp() 1817 { 1818 return 15; // movq (1+1+8); jmp (1+4) 1819 } 1820 1821 // relocation entries for call stub, compiled java to interpretor 1822 uint reloc_java_to_interp() 1823 { 1824 return 4; // 3 in emit_java_to_interp + 1 in Java_Static_Call 1825 } 1826 1827 //============================================================================= 1828 #ifndef PRODUCT 1829 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const 1830 { 1831 if (UseCompressedOops) { 1832 st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); 1833 if (Universe::narrow_oop_shift() != 0) { 1834 st->print_cr("\tdecode_heap_oop_not_null rscratch1, rscratch1"); 1835 } 1836 st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check"); 1837 } else { 1838 st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t" 1839 "# Inline cache check"); 1840 } 1841 st->print_cr("\tjne SharedRuntime::_ic_miss_stub"); 1842 st->print_cr("\tnop\t# nops to align entry point"); 1843 } 1844 #endif 1845 1846 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 1847 { 1848 MacroAssembler masm(&cbuf); 1849 uint insts_size = cbuf.insts_size(); 1850 if (UseCompressedOops) { 1851 masm.load_klass(rscratch1, j_rarg0); 1852 masm.cmpptr(rax, rscratch1); 1853 } else { 1854 masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); 1855 } 1856 1857 masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1858 1859 /* WARNING these NOPs are critical so that verified entry point is properly 1860 4 bytes aligned for patching by NativeJump::patch_verified_entry() */ 1861 int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3); 1862 if (OptoBreakpoint) { 1863 // Leave space for int3 1864 nops_cnt -= 1; 1865 } 1866 nops_cnt &= 0x3; // Do not add nops if code is aligned. 1867 if (nops_cnt > 0) 1868 masm.nop(nops_cnt); 1869 } 1870 1871 uint MachUEPNode::size(PhaseRegAlloc* ra_) const 1872 { 1873 return MachNode::size(ra_); // too many variables; just compute it 1874 // the hard way 1875 } 1876 1877 1878 //============================================================================= 1879 uint size_exception_handler() 1880 { 1881 // NativeCall instruction size is the same as NativeJump. 1882 // Note that this value is also credited (in output.cpp) to 1883 // the size of the code section. 1884 return NativeJump::instruction_size; 1885 } 1886 1887 // Emit exception handler code. 1888 int emit_exception_handler(CodeBuffer& cbuf) 1889 { 1890 1891 // Note that the code buffer's insts_mark is always relative to insts. 1892 // That's why we must use the macroassembler to generate a handler. 1893 MacroAssembler _masm(&cbuf); 1894 address base = 1895 __ start_a_stub(size_exception_handler()); 1896 if (base == NULL) return 0; // CodeBuffer::expand failed 1897 int offset = __ offset(); 1898 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); 1899 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 1900 __ end_a_stub(); 1901 return offset; 1902 } 1903 1904 uint size_deopt_handler() 1905 { 1906 // three 5 byte instructions 1907 return 15; 1908 } 1909 1910 // Emit deopt handler code. 1911 int emit_deopt_handler(CodeBuffer& cbuf) 1912 { 1913 1914 // Note that the code buffer's insts_mark is always relative to insts. 1915 // That's why we must use the macroassembler to generate a handler. 1916 MacroAssembler _masm(&cbuf); 1917 address base = 1918 __ start_a_stub(size_deopt_handler()); 1919 if (base == NULL) return 0; // CodeBuffer::expand failed 1920 int offset = __ offset(); 1921 address the_pc = (address) __ pc(); 1922 Label next; 1923 // push a "the_pc" on the stack without destroying any registers 1924 // as they all may be live. 1925 1926 // push address of "next" 1927 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32 1928 __ bind(next); 1929 // adjust it so it matches "the_pc" 1930 __ subptr(Address(rsp, 0), __ offset() - offset); 1931 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); 1932 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); 1933 __ end_a_stub(); 1934 return offset; 1935 } 1936 1937 1938 const bool Matcher::match_rule_supported(int opcode) { 1939 if (!has_match_rule(opcode)) 1940 return false; 1941 1942 return true; // Per default match rules are supported. 1943 } 1944 1945 int Matcher::regnum_to_fpu_offset(int regnum) 1946 { 1947 return regnum - 32; // The FP registers are in the second chunk 1948 } 1949 1950 // This is UltraSparc specific, true just means we have fast l2f conversion 1951 const bool Matcher::convL2FSupported(void) { 1952 return true; 1953 } 1954 1955 // Vector width in bytes 1956 const uint Matcher::vector_width_in_bytes(void) { 1957 return 8; 1958 } 1959 1960 // Vector ideal reg 1961 const uint Matcher::vector_ideal_reg(void) { 1962 return Op_RegD; 1963 } 1964 1965 // Is this branch offset short enough that a short branch can be used? 1966 // 1967 // NOTE: If the platform does not provide any short branch variants, then 1968 // this method should return false for offset 0. 1969 bool Matcher::is_short_branch_offset(int rule, int offset) { 1970 // the short version of jmpConUCF2 contains multiple branches, 1971 // making the reach slightly less 1972 if (rule == jmpConUCF2_rule) 1973 return (-126 <= offset && offset <= 125); 1974 return (-128 <= offset && offset <= 127); 1975 } 1976 1977 const bool Matcher::isSimpleConstant64(jlong value) { 1978 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. 1979 //return value == (int) value; // Cf. storeImmL and immL32. 1980 1981 // Probably always true, even if a temp register is required. 1982 return true; 1983 } 1984 1985 // The ecx parameter to rep stosq for the ClearArray node is in words. 1986 const bool Matcher::init_array_count_is_in_bytes = false; 1987 1988 // Threshold size for cleararray. 1989 const int Matcher::init_array_short_size = 8 * BytesPerLong; 1990 1991 // Should the Matcher clone shifts on addressing modes, expecting them 1992 // to be subsumed into complex addressing expressions or compute them 1993 // into registers? True for Intel but false for most RISCs 1994 const bool Matcher::clone_shift_expressions = true; 1995 1996 // Do we need to mask the count passed to shift instructions or does 1997 // the cpu only look at the lower 5/6 bits anyway? 1998 const bool Matcher::need_masked_shift_count = false; 1999 2000 bool Matcher::narrow_oop_use_complex_address() { 2001 assert(UseCompressedOops, "only for compressed oops code"); 2002 return (LogMinObjAlignmentInBytes <= 3); 2003 } 2004 2005 // Is it better to copy float constants, or load them directly from 2006 // memory? Intel can load a float constant from a direct address, 2007 // requiring no extra registers. Most RISCs will have to materialize 2008 // an address into a register first, so they would do better to copy 2009 // the constant from stack. 2010 const bool Matcher::rematerialize_float_constants = true; // XXX 2011 2012 // If CPU can load and store mis-aligned doubles directly then no 2013 // fixup is needed. Else we split the double into 2 integer pieces 2014 // and move it piece-by-piece. Only happens when passing doubles into 2015 // C code as the Java calling convention forces doubles to be aligned. 2016 const bool Matcher::misaligned_doubles_ok = true; 2017 2018 // No-op on amd64 2019 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {} 2020 2021 // Advertise here if the CPU requires explicit rounding operations to 2022 // implement the UseStrictFP mode. 2023 const bool Matcher::strict_fp_requires_explicit_rounding = true; 2024 2025 // Are floats conerted to double when stored to stack during deoptimization? 2026 // On x64 it is stored without convertion so we can use normal access. 2027 bool Matcher::float_in_double() { return false; } 2028 2029 // Do ints take an entire long register or just half? 2030 const bool Matcher::int_in_long = true; 2031 2032 // Return whether or not this register is ever used as an argument. 2033 // This function is used on startup to build the trampoline stubs in 2034 // generateOptoStub. Registers not mentioned will be killed by the VM 2035 // call in the trampoline, and arguments in those registers not be 2036 // available to the callee. 2037 bool Matcher::can_be_java_arg(int reg) 2038 { 2039 return 2040 reg == RDI_num || reg == RDI_H_num || 2041 reg == RSI_num || reg == RSI_H_num || 2042 reg == RDX_num || reg == RDX_H_num || 2043 reg == RCX_num || reg == RCX_H_num || 2044 reg == R8_num || reg == R8_H_num || 2045 reg == R9_num || reg == R9_H_num || 2046 reg == R12_num || reg == R12_H_num || 2047 reg == XMM0_num || reg == XMM0_H_num || 2048 reg == XMM1_num || reg == XMM1_H_num || 2049 reg == XMM2_num || reg == XMM2_H_num || 2050 reg == XMM3_num || reg == XMM3_H_num || 2051 reg == XMM4_num || reg == XMM4_H_num || 2052 reg == XMM5_num || reg == XMM5_H_num || 2053 reg == XMM6_num || reg == XMM6_H_num || 2054 reg == XMM7_num || reg == XMM7_H_num; 2055 } 2056 2057 bool Matcher::is_spillable_arg(int reg) 2058 { 2059 return can_be_java_arg(reg); 2060 } 2061 2062 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) { 2063 // In 64 bit mode a code which use multiply when 2064 // devisor is constant is faster than hardware 2065 // DIV instruction (it uses MulHiL). 2066 return false; 2067 } 2068 2069 // Register for DIVI projection of divmodI 2070 RegMask Matcher::divI_proj_mask() { 2071 return INT_RAX_REG_mask; 2072 } 2073 2074 // Register for MODI projection of divmodI 2075 RegMask Matcher::modI_proj_mask() { 2076 return INT_RDX_REG_mask; 2077 } 2078 2079 // Register for DIVL projection of divmodL 2080 RegMask Matcher::divL_proj_mask() { 2081 return LONG_RAX_REG_mask; 2082 } 2083 2084 // Register for MODL projection of divmodL 2085 RegMask Matcher::modL_proj_mask() { 2086 return LONG_RDX_REG_mask; 2087 } 2088 2089 const RegMask Matcher::method_handle_invoke_SP_save_mask() { 2090 return PTR_RBP_REG_mask; 2091 } 2092 2093 static Address build_address(int b, int i, int s, int d) { 2094 Register index = as_Register(i); 2095 Address::ScaleFactor scale = (Address::ScaleFactor)s; 2096 if (index == rsp) { 2097 index = noreg; 2098 scale = Address::no_scale; 2099 } 2100 Address addr(as_Register(b), index, scale, d); 2101 return addr; 2102 } 2103 2104 %} 2105 2106 //----------ENCODING BLOCK----------------------------------------------------- 2107 // This block specifies the encoding classes used by the compiler to 2108 // output byte streams. Encoding classes are parameterized macros 2109 // used by Machine Instruction Nodes in order to generate the bit 2110 // encoding of the instruction. Operands specify their base encoding 2111 // interface with the interface keyword. There are currently 2112 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, & 2113 // COND_INTER. REG_INTER causes an operand to generate a function 2114 // which returns its register number when queried. CONST_INTER causes 2115 // an operand to generate a function which returns the value of the 2116 // constant when queried. MEMORY_INTER causes an operand to generate 2117 // four functions which return the Base Register, the Index Register, 2118 // the Scale Value, and the Offset Value of the operand when queried. 2119 // COND_INTER causes an operand to generate six functions which return 2120 // the encoding code (ie - encoding bits for the instruction) 2121 // associated with each basic boolean condition for a conditional 2122 // instruction. 2123 // 2124 // Instructions specify two basic values for encoding. Again, a 2125 // function is available to check if the constant displacement is an 2126 // oop. They use the ins_encode keyword to specify their encoding 2127 // classes (which must be a sequence of enc_class names, and their 2128 // parameters, specified in the encoding block), and they use the 2129 // opcode keyword to specify, in order, their primary, secondary, and 2130 // tertiary opcode. Only the opcode sections which a particular 2131 // instruction needs for encoding need to be specified. 2132 encode %{ 2133 // Build emit functions for each basic byte or larger field in the 2134 // intel encoding scheme (opcode, rm, sib, immediate), and call them 2135 // from C++ code in the enc_class source block. Emit functions will 2136 // live in the main source block for now. In future, we can 2137 // generalize this by adding a syntax that specifies the sizes of 2138 // fields in an order, so that the adlc can build the emit functions 2139 // automagically 2140 2141 // Emit primary opcode 2142 enc_class OpcP 2143 %{ 2144 emit_opcode(cbuf, $primary); 2145 %} 2146 2147 // Emit secondary opcode 2148 enc_class OpcS 2149 %{ 2150 emit_opcode(cbuf, $secondary); 2151 %} 2152 2153 // Emit tertiary opcode 2154 enc_class OpcT 2155 %{ 2156 emit_opcode(cbuf, $tertiary); 2157 %} 2158 2159 // Emit opcode directly 2160 enc_class Opcode(immI d8) 2161 %{ 2162 emit_opcode(cbuf, $d8$$constant); 2163 %} 2164 2165 // Emit size prefix 2166 enc_class SizePrefix 2167 %{ 2168 emit_opcode(cbuf, 0x66); 2169 %} 2170 2171 enc_class reg(rRegI reg) 2172 %{ 2173 emit_rm(cbuf, 0x3, 0, $reg$$reg & 7); 2174 %} 2175 2176 enc_class reg_reg(rRegI dst, rRegI src) 2177 %{ 2178 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); 2179 %} 2180 2181 enc_class opc_reg_reg(immI opcode, rRegI dst, rRegI src) 2182 %{ 2183 emit_opcode(cbuf, $opcode$$constant); 2184 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); 2185 %} 2186 2187 enc_class cmpfp_fixup() %{ 2188 MacroAssembler _masm(&cbuf); 2189 emit_cmpfp_fixup(_masm); 2190 %} 2191 2192 enc_class cmpfp3(rRegI dst) 2193 %{ 2194 int dstenc = $dst$$reg; 2195 2196 // movl $dst, -1 2197 if (dstenc >= 8) { 2198 emit_opcode(cbuf, Assembler::REX_B); 2199 } 2200 emit_opcode(cbuf, 0xB8 | (dstenc & 7)); 2201 emit_d32(cbuf, -1); 2202 2203 // jp,s done 2204 emit_opcode(cbuf, 0x7A); 2205 emit_d8(cbuf, dstenc < 4 ? 0x08 : 0x0A); 2206 2207 // jb,s done 2208 emit_opcode(cbuf, 0x72); 2209 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08); 2210 2211 // setne $dst 2212 if (dstenc >= 4) { 2213 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B); 2214 } 2215 emit_opcode(cbuf, 0x0F); 2216 emit_opcode(cbuf, 0x95); 2217 emit_opcode(cbuf, 0xC0 | (dstenc & 7)); 2218 2219 // movzbl $dst, $dst 2220 if (dstenc >= 4) { 2221 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB); 2222 } 2223 emit_opcode(cbuf, 0x0F); 2224 emit_opcode(cbuf, 0xB6); 2225 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7); 2226 %} 2227 2228 enc_class cdql_enc(no_rax_rdx_RegI div) 2229 %{ 2230 // Full implementation of Java idiv and irem; checks for 2231 // special case as described in JVM spec., p.243 & p.271. 2232 // 2233 // normal case special case 2234 // 2235 // input : rax: dividend min_int 2236 // reg: divisor -1 2237 // 2238 // output: rax: quotient (= rax idiv reg) min_int 2239 // rdx: remainder (= rax irem reg) 0 2240 // 2241 // Code sequnce: 2242 // 2243 // 0: 3d 00 00 00 80 cmp $0x80000000,%eax 2244 // 5: 75 07/08 jne e <normal> 2245 // 7: 33 d2 xor %edx,%edx 2246 // [div >= 8 -> offset + 1] 2247 // [REX_B] 2248 // 9: 83 f9 ff cmp $0xffffffffffffffff,$div 2249 // c: 74 03/04 je 11 <done> 2250 // 000000000000000e <normal>: 2251 // e: 99 cltd 2252 // [div >= 8 -> offset + 1] 2253 // [REX_B] 2254 // f: f7 f9 idiv $div 2255 // 0000000000000011 <done>: 2256 2257 // cmp $0x80000000,%eax 2258 emit_opcode(cbuf, 0x3d); 2259 emit_d8(cbuf, 0x00); 2260 emit_d8(cbuf, 0x00); 2261 emit_d8(cbuf, 0x00); 2262 emit_d8(cbuf, 0x80); 2263 2264 // jne e <normal> 2265 emit_opcode(cbuf, 0x75); 2266 emit_d8(cbuf, $div$$reg < 8 ? 0x07 : 0x08); 2267 2268 // xor %edx,%edx 2269 emit_opcode(cbuf, 0x33); 2270 emit_d8(cbuf, 0xD2); 2271 2272 // cmp $0xffffffffffffffff,%ecx 2273 if ($div$$reg >= 8) { 2274 emit_opcode(cbuf, Assembler::REX_B); 2275 } 2276 emit_opcode(cbuf, 0x83); 2277 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7); 2278 emit_d8(cbuf, 0xFF); 2279 2280 // je 11 <done> 2281 emit_opcode(cbuf, 0x74); 2282 emit_d8(cbuf, $div$$reg < 8 ? 0x03 : 0x04); 2283 2284 // <normal> 2285 // cltd 2286 emit_opcode(cbuf, 0x99); 2287 2288 // idivl (note: must be emitted by the user of this rule) 2289 // <done> 2290 %} 2291 2292 enc_class cdqq_enc(no_rax_rdx_RegL div) 2293 %{ 2294 // Full implementation of Java ldiv and lrem; checks for 2295 // special case as described in JVM spec., p.243 & p.271. 2296 // 2297 // normal case special case 2298 // 2299 // input : rax: dividend min_long 2300 // reg: divisor -1 2301 // 2302 // output: rax: quotient (= rax idiv reg) min_long 2303 // rdx: remainder (= rax irem reg) 0 2304 // 2305 // Code sequnce: 2306 // 2307 // 0: 48 ba 00 00 00 00 00 mov $0x8000000000000000,%rdx 2308 // 7: 00 00 80 2309 // a: 48 39 d0 cmp %rdx,%rax 2310 // d: 75 08 jne 17 <normal> 2311 // f: 33 d2 xor %edx,%edx 2312 // 11: 48 83 f9 ff cmp $0xffffffffffffffff,$div 2313 // 15: 74 05 je 1c <done> 2314 // 0000000000000017 <normal>: 2315 // 17: 48 99 cqto 2316 // 19: 48 f7 f9 idiv $div 2317 // 000000000000001c <done>: 2318 2319 // mov $0x8000000000000000,%rdx 2320 emit_opcode(cbuf, Assembler::REX_W); 2321 emit_opcode(cbuf, 0xBA); 2322 emit_d8(cbuf, 0x00); 2323 emit_d8(cbuf, 0x00); 2324 emit_d8(cbuf, 0x00); 2325 emit_d8(cbuf, 0x00); 2326 emit_d8(cbuf, 0x00); 2327 emit_d8(cbuf, 0x00); 2328 emit_d8(cbuf, 0x00); 2329 emit_d8(cbuf, 0x80); 2330 2331 // cmp %rdx,%rax 2332 emit_opcode(cbuf, Assembler::REX_W); 2333 emit_opcode(cbuf, 0x39); 2334 emit_d8(cbuf, 0xD0); 2335 2336 // jne 17 <normal> 2337 emit_opcode(cbuf, 0x75); 2338 emit_d8(cbuf, 0x08); 2339 2340 // xor %edx,%edx 2341 emit_opcode(cbuf, 0x33); 2342 emit_d8(cbuf, 0xD2); 2343 2344 // cmp $0xffffffffffffffff,$div 2345 emit_opcode(cbuf, $div$$reg < 8 ? Assembler::REX_W : Assembler::REX_WB); 2346 emit_opcode(cbuf, 0x83); 2347 emit_rm(cbuf, 0x3, 0x7, $div$$reg & 7); 2348 emit_d8(cbuf, 0xFF); 2349 2350 // je 1e <done> 2351 emit_opcode(cbuf, 0x74); 2352 emit_d8(cbuf, 0x05); 2353 2354 // <normal> 2355 // cqto 2356 emit_opcode(cbuf, Assembler::REX_W); 2357 emit_opcode(cbuf, 0x99); 2358 2359 // idivq (note: must be emitted by the user of this rule) 2360 // <done> 2361 %} 2362 2363 // Opcde enc_class for 8/32 bit immediate instructions with sign-extension 2364 enc_class OpcSE(immI imm) 2365 %{ 2366 // Emit primary opcode and set sign-extend bit 2367 // Check for 8-bit immediate, and set sign extend bit in opcode 2368 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2369 emit_opcode(cbuf, $primary | 0x02); 2370 } else { 2371 // 32-bit immediate 2372 emit_opcode(cbuf, $primary); 2373 } 2374 %} 2375 2376 enc_class OpcSErm(rRegI dst, immI imm) 2377 %{ 2378 // OpcSEr/m 2379 int dstenc = $dst$$reg; 2380 if (dstenc >= 8) { 2381 emit_opcode(cbuf, Assembler::REX_B); 2382 dstenc -= 8; 2383 } 2384 // Emit primary opcode and set sign-extend bit 2385 // Check for 8-bit immediate, and set sign extend bit in opcode 2386 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2387 emit_opcode(cbuf, $primary | 0x02); 2388 } else { 2389 // 32-bit immediate 2390 emit_opcode(cbuf, $primary); 2391 } 2392 // Emit r/m byte with secondary opcode, after primary opcode. 2393 emit_rm(cbuf, 0x3, $secondary, dstenc); 2394 %} 2395 2396 enc_class OpcSErm_wide(rRegL dst, immI imm) 2397 %{ 2398 // OpcSEr/m 2399 int dstenc = $dst$$reg; 2400 if (dstenc < 8) { 2401 emit_opcode(cbuf, Assembler::REX_W); 2402 } else { 2403 emit_opcode(cbuf, Assembler::REX_WB); 2404 dstenc -= 8; 2405 } 2406 // Emit primary opcode and set sign-extend bit 2407 // Check for 8-bit immediate, and set sign extend bit in opcode 2408 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2409 emit_opcode(cbuf, $primary | 0x02); 2410 } else { 2411 // 32-bit immediate 2412 emit_opcode(cbuf, $primary); 2413 } 2414 // Emit r/m byte with secondary opcode, after primary opcode. 2415 emit_rm(cbuf, 0x3, $secondary, dstenc); 2416 %} 2417 2418 enc_class Con8or32(immI imm) 2419 %{ 2420 // Check for 8-bit immediate, and set sign extend bit in opcode 2421 if (-0x80 <= $imm$$constant && $imm$$constant < 0x80) { 2422 $$$emit8$imm$$constant; 2423 } else { 2424 // 32-bit immediate 2425 $$$emit32$imm$$constant; 2426 } 2427 %} 2428 2429 enc_class Lbl(label labl) 2430 %{ 2431 // GOTO 2432 Label* l = $labl$$label; 2433 emit_d32(cbuf, (l->loc_pos() - (cbuf.insts_size() + 4))); 2434 %} 2435 2436 enc_class LblShort(label labl) 2437 %{ 2438 // GOTO 2439 Label* l = $labl$$label; 2440 int disp = l->loc_pos() - (cbuf.insts_size() + 1); 2441 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 2442 emit_d8(cbuf, disp); 2443 %} 2444 2445 enc_class opc2_reg(rRegI dst) 2446 %{ 2447 // BSWAP 2448 emit_cc(cbuf, $secondary, $dst$$reg); 2449 %} 2450 2451 enc_class opc3_reg(rRegI dst) 2452 %{ 2453 // BSWAP 2454 emit_cc(cbuf, $tertiary, $dst$$reg); 2455 %} 2456 2457 enc_class reg_opc(rRegI div) 2458 %{ 2459 // INC, DEC, IDIV, IMOD, JMP indirect, ... 2460 emit_rm(cbuf, 0x3, $secondary, $div$$reg & 7); 2461 %} 2462 2463 enc_class Jcc(cmpOp cop, label labl) 2464 %{ 2465 // JCC 2466 Label* l = $labl$$label; 2467 $$$emit8$primary; 2468 emit_cc(cbuf, $secondary, $cop$$cmpcode); 2469 emit_d32(cbuf, (l->loc_pos() - (cbuf.insts_size() + 4))); 2470 %} 2471 2472 enc_class JccShort (cmpOp cop, label labl) 2473 %{ 2474 // JCC 2475 Label *l = $labl$$label; 2476 emit_cc(cbuf, $primary, $cop$$cmpcode); 2477 int disp = l->loc_pos() - (cbuf.insts_size() + 1); 2478 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 2479 emit_d8(cbuf, disp); 2480 %} 2481 2482 enc_class enc_cmov(cmpOp cop) 2483 %{ 2484 // CMOV 2485 $$$emit8$primary; 2486 emit_cc(cbuf, $secondary, $cop$$cmpcode); 2487 %} 2488 2489 enc_class enc_cmovf_branch(cmpOp cop, regF dst, regF src) 2490 %{ 2491 // Invert sense of branch from sense of cmov 2492 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1); 2493 emit_d8(cbuf, ($dst$$reg < 8 && $src$$reg < 8) 2494 ? (UseXmmRegToRegMoveAll ? 3 : 4) 2495 : (UseXmmRegToRegMoveAll ? 4 : 5) ); // REX 2496 // UseXmmRegToRegMoveAll ? movaps(dst, src) : movss(dst, src) 2497 if (!UseXmmRegToRegMoveAll) emit_opcode(cbuf, 0xF3); 2498 if ($dst$$reg < 8) { 2499 if ($src$$reg >= 8) { 2500 emit_opcode(cbuf, Assembler::REX_B); 2501 } 2502 } else { 2503 if ($src$$reg < 8) { 2504 emit_opcode(cbuf, Assembler::REX_R); 2505 } else { 2506 emit_opcode(cbuf, Assembler::REX_RB); 2507 } 2508 } 2509 emit_opcode(cbuf, 0x0F); 2510 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10); 2511 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); 2512 %} 2513 2514 enc_class enc_cmovd_branch(cmpOp cop, regD dst, regD src) 2515 %{ 2516 // Invert sense of branch from sense of cmov 2517 emit_cc(cbuf, 0x70, $cop$$cmpcode ^ 1); 2518 emit_d8(cbuf, $dst$$reg < 8 && $src$$reg < 8 ? 4 : 5); // REX 2519 2520 // UseXmmRegToRegMoveAll ? movapd(dst, src) : movsd(dst, src) 2521 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x66 : 0xF2); 2522 if ($dst$$reg < 8) { 2523 if ($src$$reg >= 8) { 2524 emit_opcode(cbuf, Assembler::REX_B); 2525 } 2526 } else { 2527 if ($src$$reg < 8) { 2528 emit_opcode(cbuf, Assembler::REX_R); 2529 } else { 2530 emit_opcode(cbuf, Assembler::REX_RB); 2531 } 2532 } 2533 emit_opcode(cbuf, 0x0F); 2534 emit_opcode(cbuf, UseXmmRegToRegMoveAll ? 0x28 : 0x10); 2535 emit_rm(cbuf, 0x3, $dst$$reg & 7, $src$$reg & 7); 2536 %} 2537 2538 enc_class enc_PartialSubtypeCheck() 2539 %{ 2540 Register Rrdi = as_Register(RDI_enc); // result register 2541 Register Rrax = as_Register(RAX_enc); // super class 2542 Register Rrcx = as_Register(RCX_enc); // killed 2543 Register Rrsi = as_Register(RSI_enc); // sub class 2544 Label miss; 2545 const bool set_cond_codes = true; 2546 2547 MacroAssembler _masm(&cbuf); 2548 __ check_klass_subtype_slow_path(Rrsi, Rrax, Rrcx, Rrdi, 2549 NULL, &miss, 2550 /*set_cond_codes:*/ true); 2551 if ($primary) { 2552 __ xorptr(Rrdi, Rrdi); 2553 } 2554 __ bind(miss); 2555 %} 2556 2557 enc_class Java_To_Interpreter(method meth) 2558 %{ 2559 // CALL Java_To_Interpreter 2560 // This is the instruction starting address for relocation info. 2561 cbuf.set_insts_mark(); 2562 $$$emit8$primary; 2563 // CALL directly to the runtime 2564 emit_d32_reloc(cbuf, 2565 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2566 runtime_call_Relocation::spec(), 2567 RELOC_DISP32); 2568 %} 2569 2570 enc_class preserve_SP %{ 2571 debug_only(int off0 = cbuf.insts_size()); 2572 MacroAssembler _masm(&cbuf); 2573 // RBP is preserved across all calls, even compiled calls. 2574 // Use it to preserve RSP in places where the callee might change the SP. 2575 __ movptr(rbp_mh_SP_save, rsp); 2576 debug_only(int off1 = cbuf.insts_size()); 2577 assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); 2578 %} 2579 2580 enc_class restore_SP %{ 2581 MacroAssembler _masm(&cbuf); 2582 __ movptr(rsp, rbp_mh_SP_save); 2583 %} 2584 2585 enc_class Java_Static_Call(method meth) 2586 %{ 2587 // JAVA STATIC CALL 2588 // CALL to fixup routine. Fixup routine uses ScopeDesc info to 2589 // determine who we intended to call. 2590 cbuf.set_insts_mark(); 2591 $$$emit8$primary; 2592 2593 if (!_method) { 2594 emit_d32_reloc(cbuf, 2595 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2596 runtime_call_Relocation::spec(), 2597 RELOC_DISP32); 2598 } else if (_optimized_virtual) { 2599 emit_d32_reloc(cbuf, 2600 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2601 opt_virtual_call_Relocation::spec(), 2602 RELOC_DISP32); 2603 } else { 2604 emit_d32_reloc(cbuf, 2605 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2606 static_call_Relocation::spec(), 2607 RELOC_DISP32); 2608 } 2609 if (_method) { 2610 // Emit stub for static call 2611 emit_java_to_interp(cbuf); 2612 } 2613 %} 2614 2615 enc_class Java_Dynamic_Call(method meth) 2616 %{ 2617 // JAVA DYNAMIC CALL 2618 // !!!!! 2619 // Generate "movq rax, -1", placeholder instruction to load oop-info 2620 // emit_call_dynamic_prologue( cbuf ); 2621 cbuf.set_insts_mark(); 2622 2623 // movq rax, -1 2624 emit_opcode(cbuf, Assembler::REX_W); 2625 emit_opcode(cbuf, 0xB8 | RAX_enc); 2626 emit_d64_reloc(cbuf, 2627 (int64_t) Universe::non_oop_word(), 2628 oop_Relocation::spec_for_immediate(), RELOC_IMM64); 2629 address virtual_call_oop_addr = cbuf.insts_mark(); 2630 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 2631 // who we intended to call. 2632 cbuf.set_insts_mark(); 2633 $$$emit8$primary; 2634 emit_d32_reloc(cbuf, 2635 (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 2636 virtual_call_Relocation::spec(virtual_call_oop_addr), 2637 RELOC_DISP32); 2638 %} 2639 2640 enc_class Java_Compiled_Call(method meth) 2641 %{ 2642 // JAVA COMPILED CALL 2643 int disp = in_bytes(methodOopDesc:: from_compiled_offset()); 2644 2645 // XXX XXX offset is 128 is 1.5 NON-PRODUCT !!! 2646 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small"); 2647 2648 // callq *disp(%rax) 2649 cbuf.set_insts_mark(); 2650 $$$emit8$primary; 2651 if (disp < 0x80) { 2652 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte 2653 emit_d8(cbuf, disp); // Displacement 2654 } else { 2655 emit_rm(cbuf, 0x02, $secondary, RAX_enc); // R/M byte 2656 emit_d32(cbuf, disp); // Displacement 2657 } 2658 %} 2659 2660 enc_class reg_opc_imm(rRegI dst, immI8 shift) 2661 %{ 2662 // SAL, SAR, SHR 2663 int dstenc = $dst$$reg; 2664 if (dstenc >= 8) { 2665 emit_opcode(cbuf, Assembler::REX_B); 2666 dstenc -= 8; 2667 } 2668 $$$emit8$primary; 2669 emit_rm(cbuf, 0x3, $secondary, dstenc); 2670 $$$emit8$shift$$constant; 2671 %} 2672 2673 enc_class reg_opc_imm_wide(rRegL dst, immI8 shift) 2674 %{ 2675 // SAL, SAR, SHR 2676 int dstenc = $dst$$reg; 2677 if (dstenc < 8) { 2678 emit_opcode(cbuf, Assembler::REX_W); 2679 } else { 2680 emit_opcode(cbuf, Assembler::REX_WB); 2681 dstenc -= 8; 2682 } 2683 $$$emit8$primary; 2684 emit_rm(cbuf, 0x3, $secondary, dstenc); 2685 $$$emit8$shift$$constant; 2686 %} 2687 2688 enc_class load_immI(rRegI dst, immI src) 2689 %{ 2690 int dstenc = $dst$$reg; 2691 if (dstenc >= 8) { 2692 emit_opcode(cbuf, Assembler::REX_B); 2693 dstenc -= 8; 2694 } 2695 emit_opcode(cbuf, 0xB8 | dstenc); 2696 $$$emit32$src$$constant; 2697 %} 2698 2699 enc_class load_immL(rRegL dst, immL src) 2700 %{ 2701 int dstenc = $dst$$reg; 2702 if (dstenc < 8) { 2703 emit_opcode(cbuf, Assembler::REX_W); 2704 } else { 2705 emit_opcode(cbuf, Assembler::REX_WB); 2706 dstenc -= 8; 2707 } 2708 emit_opcode(cbuf, 0xB8 | dstenc); 2709 emit_d64(cbuf, $src$$constant); 2710 %} 2711 2712 enc_class load_immUL32(rRegL dst, immUL32 src) 2713 %{ 2714 // same as load_immI, but this time we care about zeroes in the high word 2715 int dstenc = $dst$$reg; 2716 if (dstenc >= 8) { 2717 emit_opcode(cbuf, Assembler::REX_B); 2718 dstenc -= 8; 2719 } 2720 emit_opcode(cbuf, 0xB8 | dstenc); 2721 $$$emit32$src$$constant; 2722 %} 2723 2724 enc_class load_immL32(rRegL dst, immL32 src) 2725 %{ 2726 int dstenc = $dst$$reg; 2727 if (dstenc < 8) { 2728 emit_opcode(cbuf, Assembler::REX_W); 2729 } else { 2730 emit_opcode(cbuf, Assembler::REX_WB); 2731 dstenc -= 8; 2732 } 2733 emit_opcode(cbuf, 0xC7); 2734 emit_rm(cbuf, 0x03, 0x00, dstenc); 2735 $$$emit32$src$$constant; 2736 %} 2737 2738 enc_class load_immP31(rRegP dst, immP32 src) 2739 %{ 2740 // same as load_immI, but this time we care about zeroes in the high word 2741 int dstenc = $dst$$reg; 2742 if (dstenc >= 8) { 2743 emit_opcode(cbuf, Assembler::REX_B); 2744 dstenc -= 8; 2745 } 2746 emit_opcode(cbuf, 0xB8 | dstenc); 2747 $$$emit32$src$$constant; 2748 %} 2749 2750 enc_class load_immP(rRegP dst, immP src) 2751 %{ 2752 int dstenc = $dst$$reg; 2753 if (dstenc < 8) { 2754 emit_opcode(cbuf, Assembler::REX_W); 2755 } else { 2756 emit_opcode(cbuf, Assembler::REX_WB); 2757 dstenc -= 8; 2758 } 2759 emit_opcode(cbuf, 0xB8 | dstenc); 2760 // This next line should be generated from ADLC 2761 if ($src->constant_is_oop()) { 2762 emit_d64_reloc(cbuf, $src$$constant, relocInfo::oop_type, RELOC_IMM64); 2763 } else { 2764 emit_d64(cbuf, $src$$constant); 2765 } 2766 %} 2767 2768 // Encode a reg-reg copy. If it is useless, then empty encoding. 2769 enc_class enc_copy(rRegI dst, rRegI src) 2770 %{ 2771 encode_copy(cbuf, $dst$$reg, $src$$reg); 2772 %} 2773 2774 // Encode xmm reg-reg copy. If it is useless, then empty encoding. 2775 enc_class enc_CopyXD( RegD dst, RegD src ) %{ 2776 encode_CopyXD( cbuf, $dst$$reg, $src$$reg ); 2777 %} 2778 2779 enc_class enc_copy_always(rRegI dst, rRegI src) 2780 %{ 2781 int srcenc = $src$$reg; 2782 int dstenc = $dst$$reg; 2783 2784 if (dstenc < 8) { 2785 if (srcenc >= 8) { 2786 emit_opcode(cbuf, Assembler::REX_B); 2787 srcenc -= 8; 2788 } 2789 } else { 2790 if (srcenc < 8) { 2791 emit_opcode(cbuf, Assembler::REX_R); 2792 } else { 2793 emit_opcode(cbuf, Assembler::REX_RB); 2794 srcenc -= 8; 2795 } 2796 dstenc -= 8; 2797 } 2798 2799 emit_opcode(cbuf, 0x8B); 2800 emit_rm(cbuf, 0x3, dstenc, srcenc); 2801 %} 2802 2803 enc_class enc_copy_wide(rRegL dst, rRegL src) 2804 %{ 2805 int srcenc = $src$$reg; 2806 int dstenc = $dst$$reg; 2807 2808 if (dstenc != srcenc) { 2809 if (dstenc < 8) { 2810 if (srcenc < 8) { 2811 emit_opcode(cbuf, Assembler::REX_W); 2812 } else { 2813 emit_opcode(cbuf, Assembler::REX_WB); 2814 srcenc -= 8; 2815 } 2816 } else { 2817 if (srcenc < 8) { 2818 emit_opcode(cbuf, Assembler::REX_WR); 2819 } else { 2820 emit_opcode(cbuf, Assembler::REX_WRB); 2821 srcenc -= 8; 2822 } 2823 dstenc -= 8; 2824 } 2825 emit_opcode(cbuf, 0x8B); 2826 emit_rm(cbuf, 0x3, dstenc, srcenc); 2827 } 2828 %} 2829 2830 enc_class Con32(immI src) 2831 %{ 2832 // Output immediate 2833 $$$emit32$src$$constant; 2834 %} 2835 2836 enc_class Con64(immL src) 2837 %{ 2838 // Output immediate 2839 emit_d64($src$$constant); 2840 %} 2841 2842 enc_class Con32F_as_bits(immF src) 2843 %{ 2844 // Output Float immediate bits 2845 jfloat jf = $src$$constant; 2846 jint jf_as_bits = jint_cast(jf); 2847 emit_d32(cbuf, jf_as_bits); 2848 %} 2849 2850 enc_class Con16(immI src) 2851 %{ 2852 // Output immediate 2853 $$$emit16$src$$constant; 2854 %} 2855 2856 // How is this different from Con32??? XXX 2857 enc_class Con_d32(immI src) 2858 %{ 2859 emit_d32(cbuf,$src$$constant); 2860 %} 2861 2862 enc_class conmemref (rRegP t1) %{ // Con32(storeImmI) 2863 // Output immediate memory reference 2864 emit_rm(cbuf, 0x00, $t1$$reg, 0x05 ); 2865 emit_d32(cbuf, 0x00); 2866 %} 2867 2868 enc_class lock_prefix() 2869 %{ 2870 if (os::is_MP()) { 2871 emit_opcode(cbuf, 0xF0); // lock 2872 } 2873 %} 2874 2875 enc_class REX_mem(memory mem) 2876 %{ 2877 if ($mem$$base >= 8) { 2878 if ($mem$$index < 8) { 2879 emit_opcode(cbuf, Assembler::REX_B); 2880 } else { 2881 emit_opcode(cbuf, Assembler::REX_XB); 2882 } 2883 } else { 2884 if ($mem$$index >= 8) { 2885 emit_opcode(cbuf, Assembler::REX_X); 2886 } 2887 } 2888 %} 2889 2890 enc_class REX_mem_wide(memory mem) 2891 %{ 2892 if ($mem$$base >= 8) { 2893 if ($mem$$index < 8) { 2894 emit_opcode(cbuf, Assembler::REX_WB); 2895 } else { 2896 emit_opcode(cbuf, Assembler::REX_WXB); 2897 } 2898 } else { 2899 if ($mem$$index < 8) { 2900 emit_opcode(cbuf, Assembler::REX_W); 2901 } else { 2902 emit_opcode(cbuf, Assembler::REX_WX); 2903 } 2904 } 2905 %} 2906 2907 // for byte regs 2908 enc_class REX_breg(rRegI reg) 2909 %{ 2910 if ($reg$$reg >= 4) { 2911 emit_opcode(cbuf, $reg$$reg < 8 ? Assembler::REX : Assembler::REX_B); 2912 } 2913 %} 2914 2915 // for byte regs 2916 enc_class REX_reg_breg(rRegI dst, rRegI src) 2917 %{ 2918 if ($dst$$reg < 8) { 2919 if ($src$$reg >= 4) { 2920 emit_opcode(cbuf, $src$$reg < 8 ? Assembler::REX : Assembler::REX_B); 2921 } 2922 } else { 2923 if ($src$$reg < 8) { 2924 emit_opcode(cbuf, Assembler::REX_R); 2925 } else { 2926 emit_opcode(cbuf, Assembler::REX_RB); 2927 } 2928 } 2929 %} 2930 2931 // for byte regs 2932 enc_class REX_breg_mem(rRegI reg, memory mem) 2933 %{ 2934 if ($reg$$reg < 8) { 2935 if ($mem$$base < 8) { 2936 if ($mem$$index >= 8) { 2937 emit_opcode(cbuf, Assembler::REX_X); 2938 } else if ($reg$$reg >= 4) { 2939 emit_opcode(cbuf, Assembler::REX); 2940 } 2941 } else { 2942 if ($mem$$index < 8) { 2943 emit_opcode(cbuf, Assembler::REX_B); 2944 } else { 2945 emit_opcode(cbuf, Assembler::REX_XB); 2946 } 2947 } 2948 } else { 2949 if ($mem$$base < 8) { 2950 if ($mem$$index < 8) { 2951 emit_opcode(cbuf, Assembler::REX_R); 2952 } else { 2953 emit_opcode(cbuf, Assembler::REX_RX); 2954 } 2955 } else { 2956 if ($mem$$index < 8) { 2957 emit_opcode(cbuf, Assembler::REX_RB); 2958 } else { 2959 emit_opcode(cbuf, Assembler::REX_RXB); 2960 } 2961 } 2962 } 2963 %} 2964 2965 enc_class REX_reg(rRegI reg) 2966 %{ 2967 if ($reg$$reg >= 8) { 2968 emit_opcode(cbuf, Assembler::REX_B); 2969 } 2970 %} 2971 2972 enc_class REX_reg_wide(rRegI reg) 2973 %{ 2974 if ($reg$$reg < 8) { 2975 emit_opcode(cbuf, Assembler::REX_W); 2976 } else { 2977 emit_opcode(cbuf, Assembler::REX_WB); 2978 } 2979 %} 2980 2981 enc_class REX_reg_reg(rRegI dst, rRegI src) 2982 %{ 2983 if ($dst$$reg < 8) { 2984 if ($src$$reg >= 8) { 2985 emit_opcode(cbuf, Assembler::REX_B); 2986 } 2987 } else { 2988 if ($src$$reg < 8) { 2989 emit_opcode(cbuf, Assembler::REX_R); 2990 } else { 2991 emit_opcode(cbuf, Assembler::REX_RB); 2992 } 2993 } 2994 %} 2995 2996 enc_class REX_reg_reg_wide(rRegI dst, rRegI src) 2997 %{ 2998 if ($dst$$reg < 8) { 2999 if ($src$$reg < 8) { 3000 emit_opcode(cbuf, Assembler::REX_W); 3001 } else { 3002 emit_opcode(cbuf, Assembler::REX_WB); 3003 } 3004 } else { 3005 if ($src$$reg < 8) { 3006 emit_opcode(cbuf, Assembler::REX_WR); 3007 } else { 3008 emit_opcode(cbuf, Assembler::REX_WRB); 3009 } 3010 } 3011 %} 3012 3013 enc_class REX_reg_mem(rRegI reg, memory mem) 3014 %{ 3015 if ($reg$$reg < 8) { 3016 if ($mem$$base < 8) { 3017 if ($mem$$index >= 8) { 3018 emit_opcode(cbuf, Assembler::REX_X); 3019 } 3020 } else { 3021 if ($mem$$index < 8) { 3022 emit_opcode(cbuf, Assembler::REX_B); 3023 } else { 3024 emit_opcode(cbuf, Assembler::REX_XB); 3025 } 3026 } 3027 } else { 3028 if ($mem$$base < 8) { 3029 if ($mem$$index < 8) { 3030 emit_opcode(cbuf, Assembler::REX_R); 3031 } else { 3032 emit_opcode(cbuf, Assembler::REX_RX); 3033 } 3034 } else { 3035 if ($mem$$index < 8) { 3036 emit_opcode(cbuf, Assembler::REX_RB); 3037 } else { 3038 emit_opcode(cbuf, Assembler::REX_RXB); 3039 } 3040 } 3041 } 3042 %} 3043 3044 enc_class REX_reg_mem_wide(rRegL reg, memory mem) 3045 %{ 3046 if ($reg$$reg < 8) { 3047 if ($mem$$base < 8) { 3048 if ($mem$$index < 8) { 3049 emit_opcode(cbuf, Assembler::REX_W); 3050 } else { 3051 emit_opcode(cbuf, Assembler::REX_WX); 3052 } 3053 } else { 3054 if ($mem$$index < 8) { 3055 emit_opcode(cbuf, Assembler::REX_WB); 3056 } else { 3057 emit_opcode(cbuf, Assembler::REX_WXB); 3058 } 3059 } 3060 } else { 3061 if ($mem$$base < 8) { 3062 if ($mem$$index < 8) { 3063 emit_opcode(cbuf, Assembler::REX_WR); 3064 } else { 3065 emit_opcode(cbuf, Assembler::REX_WRX); 3066 } 3067 } else { 3068 if ($mem$$index < 8) { 3069 emit_opcode(cbuf, Assembler::REX_WRB); 3070 } else { 3071 emit_opcode(cbuf, Assembler::REX_WRXB); 3072 } 3073 } 3074 } 3075 %} 3076 3077 enc_class reg_mem(rRegI ereg, memory mem) 3078 %{ 3079 // High registers handle in encode_RegMem 3080 int reg = $ereg$$reg; 3081 int base = $mem$$base; 3082 int index = $mem$$index; 3083 int scale = $mem$$scale; 3084 int disp = $mem$$disp; 3085 bool disp_is_oop = $mem->disp_is_oop(); 3086 3087 encode_RegMem(cbuf, reg, base, index, scale, disp, disp_is_oop); 3088 %} 3089 3090 enc_class RM_opc_mem(immI rm_opcode, memory mem) 3091 %{ 3092 int rm_byte_opcode = $rm_opcode$$constant; 3093 3094 // High registers handle in encode_RegMem 3095 int base = $mem$$base; 3096 int index = $mem$$index; 3097 int scale = $mem$$scale; 3098 int displace = $mem$$disp; 3099 3100 bool disp_is_oop = $mem->disp_is_oop(); // disp-as-oop when 3101 // working with static 3102 // globals 3103 encode_RegMem(cbuf, rm_byte_opcode, base, index, scale, displace, 3104 disp_is_oop); 3105 %} 3106 3107 enc_class reg_lea(rRegI dst, rRegI src0, immI src1) 3108 %{ 3109 int reg_encoding = $dst$$reg; 3110 int base = $src0$$reg; // 0xFFFFFFFF indicates no base 3111 int index = 0x04; // 0x04 indicates no index 3112 int scale = 0x00; // 0x00 indicates no scale 3113 int displace = $src1$$constant; // 0x00 indicates no displacement 3114 bool disp_is_oop = false; 3115 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, 3116 disp_is_oop); 3117 %} 3118 3119 enc_class neg_reg(rRegI dst) 3120 %{ 3121 int dstenc = $dst$$reg; 3122 if (dstenc >= 8) { 3123 emit_opcode(cbuf, Assembler::REX_B); 3124 dstenc -= 8; 3125 } 3126 // NEG $dst 3127 emit_opcode(cbuf, 0xF7); 3128 emit_rm(cbuf, 0x3, 0x03, dstenc); 3129 %} 3130 3131 enc_class neg_reg_wide(rRegI dst) 3132 %{ 3133 int dstenc = $dst$$reg; 3134 if (dstenc < 8) { 3135 emit_opcode(cbuf, Assembler::REX_W); 3136 } else { 3137 emit_opcode(cbuf, Assembler::REX_WB); 3138 dstenc -= 8; 3139 } 3140 // NEG $dst 3141 emit_opcode(cbuf, 0xF7); 3142 emit_rm(cbuf, 0x3, 0x03, dstenc); 3143 %} 3144 3145 enc_class setLT_reg(rRegI dst) 3146 %{ 3147 int dstenc = $dst$$reg; 3148 if (dstenc >= 8) { 3149 emit_opcode(cbuf, Assembler::REX_B); 3150 dstenc -= 8; 3151 } else if (dstenc >= 4) { 3152 emit_opcode(cbuf, Assembler::REX); 3153 } 3154 // SETLT $dst 3155 emit_opcode(cbuf, 0x0F); 3156 emit_opcode(cbuf, 0x9C); 3157 emit_rm(cbuf, 0x3, 0x0, dstenc); 3158 %} 3159 3160 enc_class setNZ_reg(rRegI dst) 3161 %{ 3162 int dstenc = $dst$$reg; 3163 if (dstenc >= 8) { 3164 emit_opcode(cbuf, Assembler::REX_B); 3165 dstenc -= 8; 3166 } else if (dstenc >= 4) { 3167 emit_opcode(cbuf, Assembler::REX); 3168 } 3169 // SETNZ $dst 3170 emit_opcode(cbuf, 0x0F); 3171 emit_opcode(cbuf, 0x95); 3172 emit_rm(cbuf, 0x3, 0x0, dstenc); 3173 %} 3174 3175 3176 // Compare the lonogs and set -1, 0, or 1 into dst 3177 enc_class cmpl3_flag(rRegL src1, rRegL src2, rRegI dst) 3178 %{ 3179 int src1enc = $src1$$reg; 3180 int src2enc = $src2$$reg; 3181 int dstenc = $dst$$reg; 3182 3183 // cmpq $src1, $src2 3184 if (src1enc < 8) { 3185 if (src2enc < 8) { 3186 emit_opcode(cbuf, Assembler::REX_W); 3187 } else { 3188 emit_opcode(cbuf, Assembler::REX_WB); 3189 } 3190 } else { 3191 if (src2enc < 8) { 3192 emit_opcode(cbuf, Assembler::REX_WR); 3193 } else { 3194 emit_opcode(cbuf, Assembler::REX_WRB); 3195 } 3196 } 3197 emit_opcode(cbuf, 0x3B); 3198 emit_rm(cbuf, 0x3, src1enc & 7, src2enc & 7); 3199 3200 // movl $dst, -1 3201 if (dstenc >= 8) { 3202 emit_opcode(cbuf, Assembler::REX_B); 3203 } 3204 emit_opcode(cbuf, 0xB8 | (dstenc & 7)); 3205 emit_d32(cbuf, -1); 3206 3207 // jl,s done 3208 emit_opcode(cbuf, 0x7C); 3209 emit_d8(cbuf, dstenc < 4 ? 0x06 : 0x08); 3210 3211 // setne $dst 3212 if (dstenc >= 4) { 3213 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_B); 3214 } 3215 emit_opcode(cbuf, 0x0F); 3216 emit_opcode(cbuf, 0x95); 3217 emit_opcode(cbuf, 0xC0 | (dstenc & 7)); 3218 3219 // movzbl $dst, $dst 3220 if (dstenc >= 4) { 3221 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX : Assembler::REX_RB); 3222 } 3223 emit_opcode(cbuf, 0x0F); 3224 emit_opcode(cbuf, 0xB6); 3225 emit_rm(cbuf, 0x3, dstenc & 7, dstenc & 7); 3226 %} 3227 3228 enc_class Push_ResultXD(regD dst) %{ 3229 int dstenc = $dst$$reg; 3230 3231 store_to_stackslot( cbuf, 0xDD, 0x03, 0 ); //FSTP [RSP] 3232 3233 // UseXmmLoadAndClearUpper ? movsd dst,[rsp] : movlpd dst,[rsp] 3234 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66); 3235 if (dstenc >= 8) { 3236 emit_opcode(cbuf, Assembler::REX_R); 3237 } 3238 emit_opcode (cbuf, 0x0F ); 3239 emit_opcode (cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12 ); 3240 encode_RegMem(cbuf, dstenc, RSP_enc, 0x4, 0, 0, false); 3241 3242 // add rsp,8 3243 emit_opcode(cbuf, Assembler::REX_W); 3244 emit_opcode(cbuf,0x83); 3245 emit_rm(cbuf,0x3, 0x0, RSP_enc); 3246 emit_d8(cbuf,0x08); 3247 %} 3248 3249 enc_class Push_SrcXD(regD src) %{ 3250 int srcenc = $src$$reg; 3251 3252 // subq rsp,#8 3253 emit_opcode(cbuf, Assembler::REX_W); 3254 emit_opcode(cbuf, 0x83); 3255 emit_rm(cbuf, 0x3, 0x5, RSP_enc); 3256 emit_d8(cbuf, 0x8); 3257 3258 // movsd [rsp],src 3259 emit_opcode(cbuf, 0xF2); 3260 if (srcenc >= 8) { 3261 emit_opcode(cbuf, Assembler::REX_R); 3262 } 3263 emit_opcode(cbuf, 0x0F); 3264 emit_opcode(cbuf, 0x11); 3265 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); 3266 3267 // fldd [rsp] 3268 emit_opcode(cbuf, 0x66); 3269 emit_opcode(cbuf, 0xDD); 3270 encode_RegMem(cbuf, 0x0, RSP_enc, 0x4, 0, 0, false); 3271 %} 3272 3273 3274 enc_class movq_ld(regD dst, memory mem) %{ 3275 MacroAssembler _masm(&cbuf); 3276 __ movq($dst$$XMMRegister, $mem$$Address); 3277 %} 3278 3279 enc_class movq_st(memory mem, regD src) %{ 3280 MacroAssembler _masm(&cbuf); 3281 __ movq($mem$$Address, $src$$XMMRegister); 3282 %} 3283 3284 enc_class pshufd_8x8(regF dst, regF src) %{ 3285 MacroAssembler _masm(&cbuf); 3286 3287 encode_CopyXD(cbuf, $dst$$reg, $src$$reg); 3288 __ punpcklbw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg)); 3289 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($dst$$reg), 0x00); 3290 %} 3291 3292 enc_class pshufd_4x16(regF dst, regF src) %{ 3293 MacroAssembler _masm(&cbuf); 3294 3295 __ pshuflw(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), 0x00); 3296 %} 3297 3298 enc_class pshufd(regD dst, regD src, int mode) %{ 3299 MacroAssembler _masm(&cbuf); 3300 3301 __ pshufd(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg), $mode); 3302 %} 3303 3304 enc_class pxor(regD dst, regD src) %{ 3305 MacroAssembler _masm(&cbuf); 3306 3307 __ pxor(as_XMMRegister($dst$$reg), as_XMMRegister($src$$reg)); 3308 %} 3309 3310 enc_class mov_i2x(regD dst, rRegI src) %{ 3311 MacroAssembler _masm(&cbuf); 3312 3313 __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg)); 3314 %} 3315 3316 // obj: object to lock 3317 // box: box address (header location) -- killed 3318 // tmp: rax -- killed 3319 // scr: rbx -- killed 3320 // 3321 // What follows is a direct transliteration of fast_lock() and fast_unlock() 3322 // from i486.ad. See that file for comments. 3323 // TODO: where possible switch from movq (r, 0) to movl(r,0) and 3324 // use the shorter encoding. (Movl clears the high-order 32-bits). 3325 3326 3327 enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr) 3328 %{ 3329 Register objReg = as_Register((int)$obj$$reg); 3330 Register boxReg = as_Register((int)$box$$reg); 3331 Register tmpReg = as_Register($tmp$$reg); 3332 Register scrReg = as_Register($scr$$reg); 3333 MacroAssembler masm(&cbuf); 3334 3335 // Verify uniqueness of register assignments -- necessary but not sufficient 3336 assert (objReg != boxReg && objReg != tmpReg && 3337 objReg != scrReg && tmpReg != scrReg, "invariant") ; 3338 3339 if (_counters != NULL) { 3340 masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr())); 3341 } 3342 if (EmitSync & 1) { 3343 // Without cast to int32_t a movptr will destroy r10 which is typically obj 3344 masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; 3345 masm.cmpptr(rsp, (int32_t)NULL_WORD) ; 3346 } else 3347 if (EmitSync & 2) { 3348 Label DONE_LABEL; 3349 if (UseBiasedLocking) { 3350 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument. 3351 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters); 3352 } 3353 // QQQ was movl... 3354 masm.movptr(tmpReg, 0x1); 3355 masm.orptr(tmpReg, Address(objReg, 0)); 3356 masm.movptr(Address(boxReg, 0), tmpReg); 3357 if (os::is_MP()) { 3358 masm.lock(); 3359 } 3360 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg 3361 masm.jcc(Assembler::equal, DONE_LABEL); 3362 3363 // Recursive locking 3364 masm.subptr(tmpReg, rsp); 3365 masm.andptr(tmpReg, 7 - os::vm_page_size()); 3366 masm.movptr(Address(boxReg, 0), tmpReg); 3367 3368 masm.bind(DONE_LABEL); 3369 masm.nop(); // avoid branch to branch 3370 } else { 3371 Label DONE_LABEL, IsInflated, Egress; 3372 3373 masm.movptr(tmpReg, Address(objReg, 0)) ; 3374 masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased 3375 masm.jcc (Assembler::notZero, IsInflated) ; 3376 3377 // it's stack-locked, biased or neutral 3378 // TODO: optimize markword triage order to reduce the number of 3379 // conditional branches in the most common cases. 3380 // Beware -- there's a subtle invariant that fetch of the markword 3381 // at [FETCH], below, will never observe a biased encoding (*101b). 3382 // If this invariant is not held we'll suffer exclusion (safety) failure. 3383 3384 if (UseBiasedLocking && !UseOptoBiasInlining) { 3385 masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters); 3386 masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH] 3387 } 3388 3389 // was q will it destroy high? 3390 masm.orl (tmpReg, 1) ; 3391 masm.movptr(Address(boxReg, 0), tmpReg) ; 3392 if (os::is_MP()) { masm.lock(); } 3393 masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg 3394 if (_counters != NULL) { 3395 masm.cond_inc32(Assembler::equal, 3396 ExternalAddress((address) _counters->fast_path_entry_count_addr())); 3397 } 3398 masm.jcc (Assembler::equal, DONE_LABEL); 3399 3400 // Recursive locking 3401 masm.subptr(tmpReg, rsp); 3402 masm.andptr(tmpReg, 7 - os::vm_page_size()); 3403 masm.movptr(Address(boxReg, 0), tmpReg); 3404 if (_counters != NULL) { 3405 masm.cond_inc32(Assembler::equal, 3406 ExternalAddress((address) _counters->fast_path_entry_count_addr())); 3407 } 3408 masm.jmp (DONE_LABEL) ; 3409 3410 masm.bind (IsInflated) ; 3411 // It's inflated 3412 3413 // TODO: someday avoid the ST-before-CAS penalty by 3414 // relocating (deferring) the following ST. 3415 // We should also think about trying a CAS without having 3416 // fetched _owner. If the CAS is successful we may 3417 // avoid an RTO->RTS upgrade on the $line. 3418 // Without cast to int32_t a movptr will destroy r10 which is typically obj 3419 masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ; 3420 3421 masm.mov (boxReg, tmpReg) ; 3422 masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 3423 masm.testptr(tmpReg, tmpReg) ; 3424 masm.jcc (Assembler::notZero, DONE_LABEL) ; 3425 3426 // It's inflated and appears unlocked 3427 if (os::is_MP()) { masm.lock(); } 3428 masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 3429 // Intentional fall-through into DONE_LABEL ... 3430 3431 masm.bind (DONE_LABEL) ; 3432 masm.nop () ; // avoid jmp to jmp 3433 } 3434 %} 3435 3436 // obj: object to unlock 3437 // box: box address (displaced header location), killed 3438 // RBX: killed tmp; cannot be obj nor box 3439 enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp) 3440 %{ 3441 3442 Register objReg = as_Register($obj$$reg); 3443 Register boxReg = as_Register($box$$reg); 3444 Register tmpReg = as_Register($tmp$$reg); 3445 MacroAssembler masm(&cbuf); 3446 3447 if (EmitSync & 4) { 3448 masm.cmpptr(rsp, 0) ; 3449 } else 3450 if (EmitSync & 8) { 3451 Label DONE_LABEL; 3452 if (UseBiasedLocking) { 3453 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); 3454 } 3455 3456 // Check whether the displaced header is 0 3457 //(=> recursive unlock) 3458 masm.movptr(tmpReg, Address(boxReg, 0)); 3459 masm.testptr(tmpReg, tmpReg); 3460 masm.jcc(Assembler::zero, DONE_LABEL); 3461 3462 // If not recursive lock, reset the header to displaced header 3463 if (os::is_MP()) { 3464 masm.lock(); 3465 } 3466 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box 3467 masm.bind(DONE_LABEL); 3468 masm.nop(); // avoid branch to branch 3469 } else { 3470 Label DONE_LABEL, Stacked, CheckSucc ; 3471 3472 if (UseBiasedLocking && !UseOptoBiasInlining) { 3473 masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL); 3474 } 3475 3476 masm.movptr(tmpReg, Address(objReg, 0)) ; 3477 masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ; 3478 masm.jcc (Assembler::zero, DONE_LABEL) ; 3479 masm.testl (tmpReg, 0x02) ; 3480 masm.jcc (Assembler::zero, Stacked) ; 3481 3482 // It's inflated 3483 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 3484 masm.xorptr(boxReg, r15_thread) ; 3485 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ; 3486 masm.jcc (Assembler::notZero, DONE_LABEL) ; 3487 masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 3488 masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 3489 masm.jcc (Assembler::notZero, CheckSucc) ; 3490 masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; 3491 masm.jmp (DONE_LABEL) ; 3492 3493 if ((EmitSync & 65536) == 0) { 3494 Label LSuccess, LGoSlowPath ; 3495 masm.bind (CheckSucc) ; 3496 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ; 3497 masm.jcc (Assembler::zero, LGoSlowPath) ; 3498 3499 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the 3500 // the explicit ST;MEMBAR combination, but masm doesn't currently support 3501 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc 3502 // are all faster when the write buffer is populated. 3503 masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ; 3504 if (os::is_MP()) { 3505 masm.lock () ; masm.addl (Address(rsp, 0), 0) ; 3506 } 3507 masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ; 3508 masm.jcc (Assembler::notZero, LSuccess) ; 3509 3510 masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX 3511 if (os::is_MP()) { masm.lock(); } 3512 masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); 3513 masm.jcc (Assembler::notEqual, LSuccess) ; 3514 // Intentional fall-through into slow-path 3515 3516 masm.bind (LGoSlowPath) ; 3517 masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure 3518 masm.jmp (DONE_LABEL) ; 3519 3520 masm.bind (LSuccess) ; 3521 masm.testl (boxReg, 0) ; // set ICC.ZF=1 to indicate success 3522 masm.jmp (DONE_LABEL) ; 3523 } 3524 3525 masm.bind (Stacked) ; 3526 masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch 3527 if (os::is_MP()) { masm.lock(); } 3528 masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box 3529 3530 if (EmitSync & 65536) { 3531 masm.bind (CheckSucc) ; 3532 } 3533 masm.bind(DONE_LABEL); 3534 if (EmitSync & 32768) { 3535 masm.nop(); // avoid branch to branch 3536 } 3537 } 3538 %} 3539 3540 3541 enc_class enc_rethrow() 3542 %{ 3543 cbuf.set_insts_mark(); 3544 emit_opcode(cbuf, 0xE9); // jmp entry 3545 emit_d32_reloc(cbuf, 3546 (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4), 3547 runtime_call_Relocation::spec(), 3548 RELOC_DISP32); 3549 %} 3550 3551 enc_class absF_encoding(regF dst) 3552 %{ 3553 int dstenc = $dst$$reg; 3554 address signmask_address = (address) StubRoutines::x86::float_sign_mask(); 3555 3556 cbuf.set_insts_mark(); 3557 if (dstenc >= 8) { 3558 emit_opcode(cbuf, Assembler::REX_R); 3559 dstenc -= 8; 3560 } 3561 // XXX reg_mem doesn't support RIP-relative addressing yet 3562 emit_opcode(cbuf, 0x0F); 3563 emit_opcode(cbuf, 0x54); 3564 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101 3565 emit_d32_reloc(cbuf, signmask_address); 3566 %} 3567 3568 enc_class absD_encoding(regD dst) 3569 %{ 3570 int dstenc = $dst$$reg; 3571 address signmask_address = (address) StubRoutines::x86::double_sign_mask(); 3572 3573 cbuf.set_insts_mark(); 3574 emit_opcode(cbuf, 0x66); 3575 if (dstenc >= 8) { 3576 emit_opcode(cbuf, Assembler::REX_R); 3577 dstenc -= 8; 3578 } 3579 // XXX reg_mem doesn't support RIP-relative addressing yet 3580 emit_opcode(cbuf, 0x0F); 3581 emit_opcode(cbuf, 0x54); 3582 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101 3583 emit_d32_reloc(cbuf, signmask_address); 3584 %} 3585 3586 enc_class negF_encoding(regF dst) 3587 %{ 3588 int dstenc = $dst$$reg; 3589 address signflip_address = (address) StubRoutines::x86::float_sign_flip(); 3590 3591 cbuf.set_insts_mark(); 3592 if (dstenc >= 8) { 3593 emit_opcode(cbuf, Assembler::REX_R); 3594 dstenc -= 8; 3595 } 3596 // XXX reg_mem doesn't support RIP-relative addressing yet 3597 emit_opcode(cbuf, 0x0F); 3598 emit_opcode(cbuf, 0x57); 3599 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101 3600 emit_d32_reloc(cbuf, signflip_address); 3601 %} 3602 3603 enc_class negD_encoding(regD dst) 3604 %{ 3605 int dstenc = $dst$$reg; 3606 address signflip_address = (address) StubRoutines::x86::double_sign_flip(); 3607 3608 cbuf.set_insts_mark(); 3609 emit_opcode(cbuf, 0x66); 3610 if (dstenc >= 8) { 3611 emit_opcode(cbuf, Assembler::REX_R); 3612 dstenc -= 8; 3613 } 3614 // XXX reg_mem doesn't support RIP-relative addressing yet 3615 emit_opcode(cbuf, 0x0F); 3616 emit_opcode(cbuf, 0x57); 3617 emit_rm(cbuf, 0x0, dstenc, 0x5); // 00 reg 101 3618 emit_d32_reloc(cbuf, signflip_address); 3619 %} 3620 3621 enc_class f2i_fixup(rRegI dst, regF src) 3622 %{ 3623 int dstenc = $dst$$reg; 3624 int srcenc = $src$$reg; 3625 3626 // cmpl $dst, #0x80000000 3627 if (dstenc >= 8) { 3628 emit_opcode(cbuf, Assembler::REX_B); 3629 } 3630 emit_opcode(cbuf, 0x81); 3631 emit_rm(cbuf, 0x3, 0x7, dstenc & 7); 3632 emit_d32(cbuf, 0x80000000); 3633 3634 // jne,s done 3635 emit_opcode(cbuf, 0x75); 3636 if (srcenc < 8 && dstenc < 8) { 3637 emit_d8(cbuf, 0xF); 3638 } else if (srcenc >= 8 && dstenc >= 8) { 3639 emit_d8(cbuf, 0x11); 3640 } else { 3641 emit_d8(cbuf, 0x10); 3642 } 3643 3644 // subq rsp, #8 3645 emit_opcode(cbuf, Assembler::REX_W); 3646 emit_opcode(cbuf, 0x83); 3647 emit_rm(cbuf, 0x3, 0x5, RSP_enc); 3648 emit_d8(cbuf, 8); 3649 3650 // movss [rsp], $src 3651 emit_opcode(cbuf, 0xF3); 3652 if (srcenc >= 8) { 3653 emit_opcode(cbuf, Assembler::REX_R); 3654 } 3655 emit_opcode(cbuf, 0x0F); 3656 emit_opcode(cbuf, 0x11); 3657 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 3658 3659 // call f2i_fixup 3660 cbuf.set_insts_mark(); 3661 emit_opcode(cbuf, 0xE8); 3662 emit_d32_reloc(cbuf, 3663 (int) 3664 (StubRoutines::x86::f2i_fixup() - cbuf.insts_end() - 4), 3665 runtime_call_Relocation::spec(), 3666 RELOC_DISP32); 3667 3668 // popq $dst 3669 if (dstenc >= 8) { 3670 emit_opcode(cbuf, Assembler::REX_B); 3671 } 3672 emit_opcode(cbuf, 0x58 | (dstenc & 7)); 3673 3674 // done: 3675 %} 3676 3677 enc_class f2l_fixup(rRegL dst, regF src) 3678 %{ 3679 int dstenc = $dst$$reg; 3680 int srcenc = $src$$reg; 3681 address const_address = (address) StubRoutines::x86::double_sign_flip(); 3682 3683 // cmpq $dst, [0x8000000000000000] 3684 cbuf.set_insts_mark(); 3685 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR); 3686 emit_opcode(cbuf, 0x39); 3687 // XXX reg_mem doesn't support RIP-relative addressing yet 3688 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101 3689 emit_d32_reloc(cbuf, const_address); 3690 3691 3692 // jne,s done 3693 emit_opcode(cbuf, 0x75); 3694 if (srcenc < 8 && dstenc < 8) { 3695 emit_d8(cbuf, 0xF); 3696 } else if (srcenc >= 8 && dstenc >= 8) { 3697 emit_d8(cbuf, 0x11); 3698 } else { 3699 emit_d8(cbuf, 0x10); 3700 } 3701 3702 // subq rsp, #8 3703 emit_opcode(cbuf, Assembler::REX_W); 3704 emit_opcode(cbuf, 0x83); 3705 emit_rm(cbuf, 0x3, 0x5, RSP_enc); 3706 emit_d8(cbuf, 8); 3707 3708 // movss [rsp], $src 3709 emit_opcode(cbuf, 0xF3); 3710 if (srcenc >= 8) { 3711 emit_opcode(cbuf, Assembler::REX_R); 3712 } 3713 emit_opcode(cbuf, 0x0F); 3714 emit_opcode(cbuf, 0x11); 3715 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 3716 3717 // call f2l_fixup 3718 cbuf.set_insts_mark(); 3719 emit_opcode(cbuf, 0xE8); 3720 emit_d32_reloc(cbuf, 3721 (int) 3722 (StubRoutines::x86::f2l_fixup() - cbuf.insts_end() - 4), 3723 runtime_call_Relocation::spec(), 3724 RELOC_DISP32); 3725 3726 // popq $dst 3727 if (dstenc >= 8) { 3728 emit_opcode(cbuf, Assembler::REX_B); 3729 } 3730 emit_opcode(cbuf, 0x58 | (dstenc & 7)); 3731 3732 // done: 3733 %} 3734 3735 enc_class d2i_fixup(rRegI dst, regD src) 3736 %{ 3737 int dstenc = $dst$$reg; 3738 int srcenc = $src$$reg; 3739 3740 // cmpl $dst, #0x80000000 3741 if (dstenc >= 8) { 3742 emit_opcode(cbuf, Assembler::REX_B); 3743 } 3744 emit_opcode(cbuf, 0x81); 3745 emit_rm(cbuf, 0x3, 0x7, dstenc & 7); 3746 emit_d32(cbuf, 0x80000000); 3747 3748 // jne,s done 3749 emit_opcode(cbuf, 0x75); 3750 if (srcenc < 8 && dstenc < 8) { 3751 emit_d8(cbuf, 0xF); 3752 } else if (srcenc >= 8 && dstenc >= 8) { 3753 emit_d8(cbuf, 0x11); 3754 } else { 3755 emit_d8(cbuf, 0x10); 3756 } 3757 3758 // subq rsp, #8 3759 emit_opcode(cbuf, Assembler::REX_W); 3760 emit_opcode(cbuf, 0x83); 3761 emit_rm(cbuf, 0x3, 0x5, RSP_enc); 3762 emit_d8(cbuf, 8); 3763 3764 // movsd [rsp], $src 3765 emit_opcode(cbuf, 0xF2); 3766 if (srcenc >= 8) { 3767 emit_opcode(cbuf, Assembler::REX_R); 3768 } 3769 emit_opcode(cbuf, 0x0F); 3770 emit_opcode(cbuf, 0x11); 3771 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 3772 3773 // call d2i_fixup 3774 cbuf.set_insts_mark(); 3775 emit_opcode(cbuf, 0xE8); 3776 emit_d32_reloc(cbuf, 3777 (int) 3778 (StubRoutines::x86::d2i_fixup() - cbuf.insts_end() - 4), 3779 runtime_call_Relocation::spec(), 3780 RELOC_DISP32); 3781 3782 // popq $dst 3783 if (dstenc >= 8) { 3784 emit_opcode(cbuf, Assembler::REX_B); 3785 } 3786 emit_opcode(cbuf, 0x58 | (dstenc & 7)); 3787 3788 // done: 3789 %} 3790 3791 enc_class d2l_fixup(rRegL dst, regD src) 3792 %{ 3793 int dstenc = $dst$$reg; 3794 int srcenc = $src$$reg; 3795 address const_address = (address) StubRoutines::x86::double_sign_flip(); 3796 3797 // cmpq $dst, [0x8000000000000000] 3798 cbuf.set_insts_mark(); 3799 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR); 3800 emit_opcode(cbuf, 0x39); 3801 // XXX reg_mem doesn't support RIP-relative addressing yet 3802 emit_rm(cbuf, 0x0, dstenc & 7, 0x5); // 00 reg 101 3803 emit_d32_reloc(cbuf, const_address); 3804 3805 3806 // jne,s done 3807 emit_opcode(cbuf, 0x75); 3808 if (srcenc < 8 && dstenc < 8) { 3809 emit_d8(cbuf, 0xF); 3810 } else if (srcenc >= 8 && dstenc >= 8) { 3811 emit_d8(cbuf, 0x11); 3812 } else { 3813 emit_d8(cbuf, 0x10); 3814 } 3815 3816 // subq rsp, #8 3817 emit_opcode(cbuf, Assembler::REX_W); 3818 emit_opcode(cbuf, 0x83); 3819 emit_rm(cbuf, 0x3, 0x5, RSP_enc); 3820 emit_d8(cbuf, 8); 3821 3822 // movsd [rsp], $src 3823 emit_opcode(cbuf, 0xF2); 3824 if (srcenc >= 8) { 3825 emit_opcode(cbuf, Assembler::REX_R); 3826 } 3827 emit_opcode(cbuf, 0x0F); 3828 emit_opcode(cbuf, 0x11); 3829 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 3830 3831 // call d2l_fixup 3832 cbuf.set_insts_mark(); 3833 emit_opcode(cbuf, 0xE8); 3834 emit_d32_reloc(cbuf, 3835 (int) 3836 (StubRoutines::x86::d2l_fixup() - cbuf.insts_end() - 4), 3837 runtime_call_Relocation::spec(), 3838 RELOC_DISP32); 3839 3840 // popq $dst 3841 if (dstenc >= 8) { 3842 emit_opcode(cbuf, Assembler::REX_B); 3843 } 3844 emit_opcode(cbuf, 0x58 | (dstenc & 7)); 3845 3846 // done: 3847 %} 3848 %} 3849 3850 3851 3852 //----------FRAME-------------------------------------------------------------- 3853 // Definition of frame structure and management information. 3854 // 3855 // S T A C K L A Y O U T Allocators stack-slot number 3856 // | (to get allocators register number 3857 // G Owned by | | v add OptoReg::stack0()) 3858 // r CALLER | | 3859 // o | +--------+ pad to even-align allocators stack-slot 3860 // w V | pad0 | numbers; owned by CALLER 3861 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned 3862 // h ^ | in | 5 3863 // | | args | 4 Holes in incoming args owned by SELF 3864 // | | | | 3 3865 // | | +--------+ 3866 // V | | old out| Empty on Intel, window on Sparc 3867 // | old |preserve| Must be even aligned. 3868 // | SP-+--------+----> Matcher::_old_SP, even aligned 3869 // | | in | 3 area for Intel ret address 3870 // Owned by |preserve| Empty on Sparc. 3871 // SELF +--------+ 3872 // | | pad2 | 2 pad to align old SP 3873 // | +--------+ 1 3874 // | | locks | 0 3875 // | +--------+----> OptoReg::stack0(), even aligned 3876 // | | pad1 | 11 pad to align new SP 3877 // | +--------+ 3878 // | | | 10 3879 // | | spills | 9 spills 3880 // V | | 8 (pad0 slot for callee) 3881 // -----------+--------+----> Matcher::_out_arg_limit, unaligned 3882 // ^ | out | 7 3883 // | | args | 6 Holes in outgoing args owned by CALLEE 3884 // Owned by +--------+ 3885 // CALLEE | new out| 6 Empty on Intel, window on Sparc 3886 // | new |preserve| Must be even-aligned. 3887 // | SP-+--------+----> Matcher::_new_SP, even aligned 3888 // | | | 3889 // 3890 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is 3891 // known from SELF's arguments and the Java calling convention. 3892 // Region 6-7 is determined per call site. 3893 // Note 2: If the calling convention leaves holes in the incoming argument 3894 // area, those holes are owned by SELF. Holes in the outgoing area 3895 // are owned by the CALLEE. Holes should not be nessecary in the 3896 // incoming area, as the Java calling convention is completely under 3897 // the control of the AD file. Doubles can be sorted and packed to 3898 // avoid holes. Holes in the outgoing arguments may be nessecary for 3899 // varargs C calling conventions. 3900 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is 3901 // even aligned with pad0 as needed. 3902 // Region 6 is even aligned. Region 6-7 is NOT even aligned; 3903 // region 6-11 is even aligned; it may be padded out more so that 3904 // the region from SP to FP meets the minimum stack alignment. 3905 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack 3906 // alignment. Region 11, pad1, may be dynamically extended so that 3907 // SP meets the minimum alignment. 3908 3909 frame 3910 %{ 3911 // What direction does stack grow in (assumed to be same for C & Java) 3912 stack_direction(TOWARDS_LOW); 3913 3914 // These three registers define part of the calling convention 3915 // between compiled code and the interpreter. 3916 inline_cache_reg(RAX); // Inline Cache Register 3917 interpreter_method_oop_reg(RBX); // Method Oop Register when 3918 // calling interpreter 3919 3920 // Optional: name the operand used by cisc-spilling to access 3921 // [stack_pointer + offset] 3922 cisc_spilling_operand_name(indOffset32); 3923 3924 // Number of stack slots consumed by locking an object 3925 sync_stack_slots(2); 3926 3927 // Compiled code's Frame Pointer 3928 frame_pointer(RSP); 3929 3930 // Interpreter stores its frame pointer in a register which is 3931 // stored to the stack by I2CAdaptors. 3932 // I2CAdaptors convert from interpreted java to compiled java. 3933 interpreter_frame_pointer(RBP); 3934 3935 // Stack alignment requirement 3936 stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) 3937 3938 // Number of stack slots between incoming argument block and the start of 3939 // a new frame. The PROLOG must add this many slots to the stack. The 3940 // EPILOG must remove this many slots. amd64 needs two slots for 3941 // return address. 3942 in_preserve_stack_slots(4 + 2 * VerifyStackAtCalls); 3943 3944 // Number of outgoing stack slots killed above the out_preserve_stack_slots 3945 // for calls to C. Supports the var-args backing area for register parms. 3946 varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt); 3947 3948 // The after-PROLOG location of the return address. Location of 3949 // return address specifies a type (REG or STACK) and a number 3950 // representing the register number (i.e. - use a register name) or 3951 // stack slot. 3952 // Ret Addr is on stack in slot 0 if no locks or verification or alignment. 3953 // Otherwise, it is above the locks and verification slot and alignment word 3954 return_addr(STACK - 2 + 3955 round_to(2 + 2 * VerifyStackAtCalls + 3956 Compile::current()->fixed_slots(), 3957 WordsPerLong * 2)); 3958 3959 // Body of function which returns an integer array locating 3960 // arguments either in registers or in stack slots. Passed an array 3961 // of ideal registers called "sig" and a "length" count. Stack-slot 3962 // offsets are based on outgoing arguments, i.e. a CALLER setting up 3963 // arguments for a CALLEE. Incoming stack arguments are 3964 // automatically biased by the preserve_stack_slots field above. 3965 3966 calling_convention 3967 %{ 3968 // No difference between ingoing/outgoing just pass false 3969 SharedRuntime::java_calling_convention(sig_bt, regs, length, false); 3970 %} 3971 3972 c_calling_convention 3973 %{ 3974 // This is obviously always outgoing 3975 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length); 3976 %} 3977 3978 // Location of compiled Java return values. Same as C for now. 3979 return_value 3980 %{ 3981 assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, 3982 "only return normal values"); 3983 3984 static const int lo[Op_RegL + 1] = { 3985 0, 3986 0, 3987 RAX_num, // Op_RegN 3988 RAX_num, // Op_RegI 3989 RAX_num, // Op_RegP 3990 XMM0_num, // Op_RegF 3991 XMM0_num, // Op_RegD 3992 RAX_num // Op_RegL 3993 }; 3994 static const int hi[Op_RegL + 1] = { 3995 0, 3996 0, 3997 OptoReg::Bad, // Op_RegN 3998 OptoReg::Bad, // Op_RegI 3999 RAX_H_num, // Op_RegP 4000 OptoReg::Bad, // Op_RegF 4001 XMM0_H_num, // Op_RegD 4002 RAX_H_num // Op_RegL 4003 }; 4004 assert(ARRAY_SIZE(hi) == _last_machine_leaf - 1, "missing type"); 4005 return OptoRegPair(hi[ideal_reg], lo[ideal_reg]); 4006 %} 4007 %} 4008 4009 //----------ATTRIBUTES--------------------------------------------------------- 4010 //----------Operand Attributes------------------------------------------------- 4011 op_attrib op_cost(0); // Required cost attribute 4012 4013 //----------Instruction Attributes--------------------------------------------- 4014 ins_attrib ins_cost(100); // Required cost attribute 4015 ins_attrib ins_size(8); // Required size attribute (in bits) 4016 ins_attrib ins_short_branch(0); // Required flag: is this instruction 4017 // a non-matching short branch variant 4018 // of some long branch? 4019 ins_attrib ins_alignment(1); // Required alignment attribute (must 4020 // be a power of 2) specifies the 4021 // alignment that some part of the 4022 // instruction (not necessarily the 4023 // start) requires. If > 1, a 4024 // compute_padding() function must be 4025 // provided for the instruction 4026 4027 //----------OPERANDS----------------------------------------------------------- 4028 // Operand definitions must precede instruction definitions for correct parsing 4029 // in the ADLC because operands constitute user defined types which are used in 4030 // instruction definitions. 4031 4032 //----------Simple Operands---------------------------------------------------- 4033 // Immediate Operands 4034 // Integer Immediate 4035 operand immI() 4036 %{ 4037 match(ConI); 4038 4039 op_cost(10); 4040 format %{ %} 4041 interface(CONST_INTER); 4042 %} 4043 4044 // Constant for test vs zero 4045 operand immI0() 4046 %{ 4047 predicate(n->get_int() == 0); 4048 match(ConI); 4049 4050 op_cost(0); 4051 format %{ %} 4052 interface(CONST_INTER); 4053 %} 4054 4055 // Constant for increment 4056 operand immI1() 4057 %{ 4058 predicate(n->get_int() == 1); 4059 match(ConI); 4060 4061 op_cost(0); 4062 format %{ %} 4063 interface(CONST_INTER); 4064 %} 4065 4066 // Constant for decrement 4067 operand immI_M1() 4068 %{ 4069 predicate(n->get_int() == -1); 4070 match(ConI); 4071 4072 op_cost(0); 4073 format %{ %} 4074 interface(CONST_INTER); 4075 %} 4076 4077 // Valid scale values for addressing modes 4078 operand immI2() 4079 %{ 4080 predicate(0 <= n->get_int() && (n->get_int() <= 3)); 4081 match(ConI); 4082 4083 format %{ %} 4084 interface(CONST_INTER); 4085 %} 4086 4087 operand immI8() 4088 %{ 4089 predicate((-0x80 <= n->get_int()) && (n->get_int() < 0x80)); 4090 match(ConI); 4091 4092 op_cost(5); 4093 format %{ %} 4094 interface(CONST_INTER); 4095 %} 4096 4097 operand immI16() 4098 %{ 4099 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767)); 4100 match(ConI); 4101 4102 op_cost(10); 4103 format %{ %} 4104 interface(CONST_INTER); 4105 %} 4106 4107 // Constant for long shifts 4108 operand immI_32() 4109 %{ 4110 predicate( n->get_int() == 32 ); 4111 match(ConI); 4112 4113 op_cost(0); 4114 format %{ %} 4115 interface(CONST_INTER); 4116 %} 4117 4118 // Constant for long shifts 4119 operand immI_64() 4120 %{ 4121 predicate( n->get_int() == 64 ); 4122 match(ConI); 4123 4124 op_cost(0); 4125 format %{ %} 4126 interface(CONST_INTER); 4127 %} 4128 4129 // Pointer Immediate 4130 operand immP() 4131 %{ 4132 match(ConP); 4133 4134 op_cost(10); 4135 format %{ %} 4136 interface(CONST_INTER); 4137 %} 4138 4139 // NULL Pointer Immediate 4140 operand immP0() 4141 %{ 4142 predicate(n->get_ptr() == 0); 4143 match(ConP); 4144 4145 op_cost(5); 4146 format %{ %} 4147 interface(CONST_INTER); 4148 %} 4149 4150 operand immP_poll() %{ 4151 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page()); 4152 match(ConP); 4153 4154 // formats are generated automatically for constants and base registers 4155 format %{ %} 4156 interface(CONST_INTER); 4157 %} 4158 4159 // Pointer Immediate 4160 operand immN() %{ 4161 match(ConN); 4162 4163 op_cost(10); 4164 format %{ %} 4165 interface(CONST_INTER); 4166 %} 4167 4168 // NULL Pointer Immediate 4169 operand immN0() %{ 4170 predicate(n->get_narrowcon() == 0); 4171 match(ConN); 4172 4173 op_cost(5); 4174 format %{ %} 4175 interface(CONST_INTER); 4176 %} 4177 4178 operand immP31() 4179 %{ 4180 predicate(!n->as_Type()->type()->isa_oopptr() 4181 && (n->get_ptr() >> 31) == 0); 4182 match(ConP); 4183 4184 op_cost(5); 4185 format %{ %} 4186 interface(CONST_INTER); 4187 %} 4188 4189 4190 // Long Immediate 4191 operand immL() 4192 %{ 4193 match(ConL); 4194 4195 op_cost(20); 4196 format %{ %} 4197 interface(CONST_INTER); 4198 %} 4199 4200 // Long Immediate 8-bit 4201 operand immL8() 4202 %{ 4203 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L); 4204 match(ConL); 4205 4206 op_cost(5); 4207 format %{ %} 4208 interface(CONST_INTER); 4209 %} 4210 4211 // Long Immediate 32-bit unsigned 4212 operand immUL32() 4213 %{ 4214 predicate(n->get_long() == (unsigned int) (n->get_long())); 4215 match(ConL); 4216 4217 op_cost(10); 4218 format %{ %} 4219 interface(CONST_INTER); 4220 %} 4221 4222 // Long Immediate 32-bit signed 4223 operand immL32() 4224 %{ 4225 predicate(n->get_long() == (int) (n->get_long())); 4226 match(ConL); 4227 4228 op_cost(15); 4229 format %{ %} 4230 interface(CONST_INTER); 4231 %} 4232 4233 // Long Immediate zero 4234 operand immL0() 4235 %{ 4236 predicate(n->get_long() == 0L); 4237 match(ConL); 4238 4239 op_cost(10); 4240 format %{ %} 4241 interface(CONST_INTER); 4242 %} 4243 4244 // Constant for increment 4245 operand immL1() 4246 %{ 4247 predicate(n->get_long() == 1); 4248 match(ConL); 4249 4250 format %{ %} 4251 interface(CONST_INTER); 4252 %} 4253 4254 // Constant for decrement 4255 operand immL_M1() 4256 %{ 4257 predicate(n->get_long() == -1); 4258 match(ConL); 4259 4260 format %{ %} 4261 interface(CONST_INTER); 4262 %} 4263 4264 // Long Immediate: the value 10 4265 operand immL10() 4266 %{ 4267 predicate(n->get_long() == 10); 4268 match(ConL); 4269 4270 format %{ %} 4271 interface(CONST_INTER); 4272 %} 4273 4274 // Long immediate from 0 to 127. 4275 // Used for a shorter form of long mul by 10. 4276 operand immL_127() 4277 %{ 4278 predicate(0 <= n->get_long() && n->get_long() < 0x80); 4279 match(ConL); 4280 4281 op_cost(10); 4282 format %{ %} 4283 interface(CONST_INTER); 4284 %} 4285 4286 // Long Immediate: low 32-bit mask 4287 operand immL_32bits() 4288 %{ 4289 predicate(n->get_long() == 0xFFFFFFFFL); 4290 match(ConL); 4291 op_cost(20); 4292 4293 format %{ %} 4294 interface(CONST_INTER); 4295 %} 4296 4297 // Float Immediate zero 4298 operand immF0() 4299 %{ 4300 predicate(jint_cast(n->getf()) == 0); 4301 match(ConF); 4302 4303 op_cost(5); 4304 format %{ %} 4305 interface(CONST_INTER); 4306 %} 4307 4308 // Float Immediate 4309 operand immF() 4310 %{ 4311 match(ConF); 4312 4313 op_cost(15); 4314 format %{ %} 4315 interface(CONST_INTER); 4316 %} 4317 4318 // Double Immediate zero 4319 operand immD0() 4320 %{ 4321 predicate(jlong_cast(n->getd()) == 0); 4322 match(ConD); 4323 4324 op_cost(5); 4325 format %{ %} 4326 interface(CONST_INTER); 4327 %} 4328 4329 // Double Immediate 4330 operand immD() 4331 %{ 4332 match(ConD); 4333 4334 op_cost(15); 4335 format %{ %} 4336 interface(CONST_INTER); 4337 %} 4338 4339 // Immediates for special shifts (sign extend) 4340 4341 // Constants for increment 4342 operand immI_16() 4343 %{ 4344 predicate(n->get_int() == 16); 4345 match(ConI); 4346 4347 format %{ %} 4348 interface(CONST_INTER); 4349 %} 4350 4351 operand immI_24() 4352 %{ 4353 predicate(n->get_int() == 24); 4354 match(ConI); 4355 4356 format %{ %} 4357 interface(CONST_INTER); 4358 %} 4359 4360 // Constant for byte-wide masking 4361 operand immI_255() 4362 %{ 4363 predicate(n->get_int() == 255); 4364 match(ConI); 4365 4366 format %{ %} 4367 interface(CONST_INTER); 4368 %} 4369 4370 // Constant for short-wide masking 4371 operand immI_65535() 4372 %{ 4373 predicate(n->get_int() == 65535); 4374 match(ConI); 4375 4376 format %{ %} 4377 interface(CONST_INTER); 4378 %} 4379 4380 // Constant for byte-wide masking 4381 operand immL_255() 4382 %{ 4383 predicate(n->get_long() == 255); 4384 match(ConL); 4385 4386 format %{ %} 4387 interface(CONST_INTER); 4388 %} 4389 4390 // Constant for short-wide masking 4391 operand immL_65535() 4392 %{ 4393 predicate(n->get_long() == 65535); 4394 match(ConL); 4395 4396 format %{ %} 4397 interface(CONST_INTER); 4398 %} 4399 4400 // Register Operands 4401 // Integer Register 4402 operand rRegI() 4403 %{ 4404 constraint(ALLOC_IN_RC(int_reg)); 4405 match(RegI); 4406 4407 match(rax_RegI); 4408 match(rbx_RegI); 4409 match(rcx_RegI); 4410 match(rdx_RegI); 4411 match(rdi_RegI); 4412 4413 format %{ %} 4414 interface(REG_INTER); 4415 %} 4416 4417 // Special Registers 4418 operand rax_RegI() 4419 %{ 4420 constraint(ALLOC_IN_RC(int_rax_reg)); 4421 match(RegI); 4422 match(rRegI); 4423 4424 format %{ "RAX" %} 4425 interface(REG_INTER); 4426 %} 4427 4428 // Special Registers 4429 operand rbx_RegI() 4430 %{ 4431 constraint(ALLOC_IN_RC(int_rbx_reg)); 4432 match(RegI); 4433 match(rRegI); 4434 4435 format %{ "RBX" %} 4436 interface(REG_INTER); 4437 %} 4438 4439 operand rcx_RegI() 4440 %{ 4441 constraint(ALLOC_IN_RC(int_rcx_reg)); 4442 match(RegI); 4443 match(rRegI); 4444 4445 format %{ "RCX" %} 4446 interface(REG_INTER); 4447 %} 4448 4449 operand rdx_RegI() 4450 %{ 4451 constraint(ALLOC_IN_RC(int_rdx_reg)); 4452 match(RegI); 4453 match(rRegI); 4454 4455 format %{ "RDX" %} 4456 interface(REG_INTER); 4457 %} 4458 4459 operand rdi_RegI() 4460 %{ 4461 constraint(ALLOC_IN_RC(int_rdi_reg)); 4462 match(RegI); 4463 match(rRegI); 4464 4465 format %{ "RDI" %} 4466 interface(REG_INTER); 4467 %} 4468 4469 operand no_rcx_RegI() 4470 %{ 4471 constraint(ALLOC_IN_RC(int_no_rcx_reg)); 4472 match(RegI); 4473 match(rax_RegI); 4474 match(rbx_RegI); 4475 match(rdx_RegI); 4476 match(rdi_RegI); 4477 4478 format %{ %} 4479 interface(REG_INTER); 4480 %} 4481 4482 operand no_rax_rdx_RegI() 4483 %{ 4484 constraint(ALLOC_IN_RC(int_no_rax_rdx_reg)); 4485 match(RegI); 4486 match(rbx_RegI); 4487 match(rcx_RegI); 4488 match(rdi_RegI); 4489 4490 format %{ %} 4491 interface(REG_INTER); 4492 %} 4493 4494 // Pointer Register 4495 operand any_RegP() 4496 %{ 4497 constraint(ALLOC_IN_RC(any_reg)); 4498 match(RegP); 4499 match(rax_RegP); 4500 match(rbx_RegP); 4501 match(rdi_RegP); 4502 match(rsi_RegP); 4503 match(rbp_RegP); 4504 match(r15_RegP); 4505 match(rRegP); 4506 4507 format %{ %} 4508 interface(REG_INTER); 4509 %} 4510 4511 operand rRegP() 4512 %{ 4513 constraint(ALLOC_IN_RC(ptr_reg)); 4514 match(RegP); 4515 match(rax_RegP); 4516 match(rbx_RegP); 4517 match(rdi_RegP); 4518 match(rsi_RegP); 4519 match(rbp_RegP); 4520 match(r15_RegP); // See Q&A below about r15_RegP. 4521 4522 format %{ %} 4523 interface(REG_INTER); 4524 %} 4525 4526 operand rRegN() %{ 4527 constraint(ALLOC_IN_RC(int_reg)); 4528 match(RegN); 4529 4530 format %{ %} 4531 interface(REG_INTER); 4532 %} 4533 4534 // Question: Why is r15_RegP (the read-only TLS register) a match for rRegP? 4535 // Answer: Operand match rules govern the DFA as it processes instruction inputs. 4536 // It's fine for an instruction input which expects rRegP to match a r15_RegP. 4537 // The output of an instruction is controlled by the allocator, which respects 4538 // register class masks, not match rules. Unless an instruction mentions 4539 // r15_RegP or any_RegP explicitly as its output, r15 will not be considered 4540 // by the allocator as an input. 4541 4542 operand no_rax_RegP() 4543 %{ 4544 constraint(ALLOC_IN_RC(ptr_no_rax_reg)); 4545 match(RegP); 4546 match(rbx_RegP); 4547 match(rsi_RegP); 4548 match(rdi_RegP); 4549 4550 format %{ %} 4551 interface(REG_INTER); 4552 %} 4553 4554 operand no_rbp_RegP() 4555 %{ 4556 constraint(ALLOC_IN_RC(ptr_no_rbp_reg)); 4557 match(RegP); 4558 match(rbx_RegP); 4559 match(rsi_RegP); 4560 match(rdi_RegP); 4561 4562 format %{ %} 4563 interface(REG_INTER); 4564 %} 4565 4566 operand no_rax_rbx_RegP() 4567 %{ 4568 constraint(ALLOC_IN_RC(ptr_no_rax_rbx_reg)); 4569 match(RegP); 4570 match(rsi_RegP); 4571 match(rdi_RegP); 4572 4573 format %{ %} 4574 interface(REG_INTER); 4575 %} 4576 4577 // Special Registers 4578 // Return a pointer value 4579 operand rax_RegP() 4580 %{ 4581 constraint(ALLOC_IN_RC(ptr_rax_reg)); 4582 match(RegP); 4583 match(rRegP); 4584 4585 format %{ %} 4586 interface(REG_INTER); 4587 %} 4588 4589 // Special Registers 4590 // Return a compressed pointer value 4591 operand rax_RegN() 4592 %{ 4593 constraint(ALLOC_IN_RC(int_rax_reg)); 4594 match(RegN); 4595 match(rRegN); 4596 4597 format %{ %} 4598 interface(REG_INTER); 4599 %} 4600 4601 // Used in AtomicAdd 4602 operand rbx_RegP() 4603 %{ 4604 constraint(ALLOC_IN_RC(ptr_rbx_reg)); 4605 match(RegP); 4606 match(rRegP); 4607 4608 format %{ %} 4609 interface(REG_INTER); 4610 %} 4611 4612 operand rsi_RegP() 4613 %{ 4614 constraint(ALLOC_IN_RC(ptr_rsi_reg)); 4615 match(RegP); 4616 match(rRegP); 4617 4618 format %{ %} 4619 interface(REG_INTER); 4620 %} 4621 4622 // Used in rep stosq 4623 operand rdi_RegP() 4624 %{ 4625 constraint(ALLOC_IN_RC(ptr_rdi_reg)); 4626 match(RegP); 4627 match(rRegP); 4628 4629 format %{ %} 4630 interface(REG_INTER); 4631 %} 4632 4633 operand rbp_RegP() 4634 %{ 4635 constraint(ALLOC_IN_RC(ptr_rbp_reg)); 4636 match(RegP); 4637 match(rRegP); 4638 4639 format %{ %} 4640 interface(REG_INTER); 4641 %} 4642 4643 operand r15_RegP() 4644 %{ 4645 constraint(ALLOC_IN_RC(ptr_r15_reg)); 4646 match(RegP); 4647 match(rRegP); 4648 4649 format %{ %} 4650 interface(REG_INTER); 4651 %} 4652 4653 operand rRegL() 4654 %{ 4655 constraint(ALLOC_IN_RC(long_reg)); 4656 match(RegL); 4657 match(rax_RegL); 4658 match(rdx_RegL); 4659 4660 format %{ %} 4661 interface(REG_INTER); 4662 %} 4663 4664 // Special Registers 4665 operand no_rax_rdx_RegL() 4666 %{ 4667 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg)); 4668 match(RegL); 4669 match(rRegL); 4670 4671 format %{ %} 4672 interface(REG_INTER); 4673 %} 4674 4675 operand no_rax_RegL() 4676 %{ 4677 constraint(ALLOC_IN_RC(long_no_rax_rdx_reg)); 4678 match(RegL); 4679 match(rRegL); 4680 match(rdx_RegL); 4681 4682 format %{ %} 4683 interface(REG_INTER); 4684 %} 4685 4686 operand no_rcx_RegL() 4687 %{ 4688 constraint(ALLOC_IN_RC(long_no_rcx_reg)); 4689 match(RegL); 4690 match(rRegL); 4691 4692 format %{ %} 4693 interface(REG_INTER); 4694 %} 4695 4696 operand rax_RegL() 4697 %{ 4698 constraint(ALLOC_IN_RC(long_rax_reg)); 4699 match(RegL); 4700 match(rRegL); 4701 4702 format %{ "RAX" %} 4703 interface(REG_INTER); 4704 %} 4705 4706 operand rcx_RegL() 4707 %{ 4708 constraint(ALLOC_IN_RC(long_rcx_reg)); 4709 match(RegL); 4710 match(rRegL); 4711 4712 format %{ %} 4713 interface(REG_INTER); 4714 %} 4715 4716 operand rdx_RegL() 4717 %{ 4718 constraint(ALLOC_IN_RC(long_rdx_reg)); 4719 match(RegL); 4720 match(rRegL); 4721 4722 format %{ %} 4723 interface(REG_INTER); 4724 %} 4725 4726 // Flags register, used as output of compare instructions 4727 operand rFlagsReg() 4728 %{ 4729 constraint(ALLOC_IN_RC(int_flags)); 4730 match(RegFlags); 4731 4732 format %{ "RFLAGS" %} 4733 interface(REG_INTER); 4734 %} 4735 4736 // Flags register, used as output of FLOATING POINT compare instructions 4737 operand rFlagsRegU() 4738 %{ 4739 constraint(ALLOC_IN_RC(int_flags)); 4740 match(RegFlags); 4741 4742 format %{ "RFLAGS_U" %} 4743 interface(REG_INTER); 4744 %} 4745 4746 operand rFlagsRegUCF() %{ 4747 constraint(ALLOC_IN_RC(int_flags)); 4748 match(RegFlags); 4749 predicate(false); 4750 4751 format %{ "RFLAGS_U_CF" %} 4752 interface(REG_INTER); 4753 %} 4754 4755 // Float register operands 4756 operand regF() 4757 %{ 4758 constraint(ALLOC_IN_RC(float_reg)); 4759 match(RegF); 4760 4761 format %{ %} 4762 interface(REG_INTER); 4763 %} 4764 4765 // Double register operands 4766 operand regD() 4767 %{ 4768 constraint(ALLOC_IN_RC(double_reg)); 4769 match(RegD); 4770 4771 format %{ %} 4772 interface(REG_INTER); 4773 %} 4774 4775 4776 //----------Memory Operands---------------------------------------------------- 4777 // Direct Memory Operand 4778 // operand direct(immP addr) 4779 // %{ 4780 // match(addr); 4781 4782 // format %{ "[$addr]" %} 4783 // interface(MEMORY_INTER) %{ 4784 // base(0xFFFFFFFF); 4785 // index(0x4); 4786 // scale(0x0); 4787 // disp($addr); 4788 // %} 4789 // %} 4790 4791 // Indirect Memory Operand 4792 operand indirect(any_RegP reg) 4793 %{ 4794 constraint(ALLOC_IN_RC(ptr_reg)); 4795 match(reg); 4796 4797 format %{ "[$reg]" %} 4798 interface(MEMORY_INTER) %{ 4799 base($reg); 4800 index(0x4); 4801 scale(0x0); 4802 disp(0x0); 4803 %} 4804 %} 4805 4806 // Indirect Memory Plus Short Offset Operand 4807 operand indOffset8(any_RegP reg, immL8 off) 4808 %{ 4809 constraint(ALLOC_IN_RC(ptr_reg)); 4810 match(AddP reg off); 4811 4812 format %{ "[$reg + $off (8-bit)]" %} 4813 interface(MEMORY_INTER) %{ 4814 base($reg); 4815 index(0x4); 4816 scale(0x0); 4817 disp($off); 4818 %} 4819 %} 4820 4821 // Indirect Memory Plus Long Offset Operand 4822 operand indOffset32(any_RegP reg, immL32 off) 4823 %{ 4824 constraint(ALLOC_IN_RC(ptr_reg)); 4825 match(AddP reg off); 4826 4827 format %{ "[$reg + $off (32-bit)]" %} 4828 interface(MEMORY_INTER) %{ 4829 base($reg); 4830 index(0x4); 4831 scale(0x0); 4832 disp($off); 4833 %} 4834 %} 4835 4836 // Indirect Memory Plus Index Register Plus Offset Operand 4837 operand indIndexOffset(any_RegP reg, rRegL lreg, immL32 off) 4838 %{ 4839 constraint(ALLOC_IN_RC(ptr_reg)); 4840 match(AddP (AddP reg lreg) off); 4841 4842 op_cost(10); 4843 format %{"[$reg + $off + $lreg]" %} 4844 interface(MEMORY_INTER) %{ 4845 base($reg); 4846 index($lreg); 4847 scale(0x0); 4848 disp($off); 4849 %} 4850 %} 4851 4852 // Indirect Memory Plus Index Register Plus Offset Operand 4853 operand indIndex(any_RegP reg, rRegL lreg) 4854 %{ 4855 constraint(ALLOC_IN_RC(ptr_reg)); 4856 match(AddP reg lreg); 4857 4858 op_cost(10); 4859 format %{"[$reg + $lreg]" %} 4860 interface(MEMORY_INTER) %{ 4861 base($reg); 4862 index($lreg); 4863 scale(0x0); 4864 disp(0x0); 4865 %} 4866 %} 4867 4868 // Indirect Memory Times Scale Plus Index Register 4869 operand indIndexScale(any_RegP reg, rRegL lreg, immI2 scale) 4870 %{ 4871 constraint(ALLOC_IN_RC(ptr_reg)); 4872 match(AddP reg (LShiftL lreg scale)); 4873 4874 op_cost(10); 4875 format %{"[$reg + $lreg << $scale]" %} 4876 interface(MEMORY_INTER) %{ 4877 base($reg); 4878 index($lreg); 4879 scale($scale); 4880 disp(0x0); 4881 %} 4882 %} 4883 4884 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand 4885 operand indIndexScaleOffset(any_RegP reg, immL32 off, rRegL lreg, immI2 scale) 4886 %{ 4887 constraint(ALLOC_IN_RC(ptr_reg)); 4888 match(AddP (AddP reg (LShiftL lreg scale)) off); 4889 4890 op_cost(10); 4891 format %{"[$reg + $off + $lreg << $scale]" %} 4892 interface(MEMORY_INTER) %{ 4893 base($reg); 4894 index($lreg); 4895 scale($scale); 4896 disp($off); 4897 %} 4898 %} 4899 4900 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand 4901 operand indPosIndexScaleOffset(any_RegP reg, immL32 off, rRegI idx, immI2 scale) 4902 %{ 4903 constraint(ALLOC_IN_RC(ptr_reg)); 4904 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); 4905 match(AddP (AddP reg (LShiftL (ConvI2L idx) scale)) off); 4906 4907 op_cost(10); 4908 format %{"[$reg + $off + $idx << $scale]" %} 4909 interface(MEMORY_INTER) %{ 4910 base($reg); 4911 index($idx); 4912 scale($scale); 4913 disp($off); 4914 %} 4915 %} 4916 4917 // Indirect Narrow Oop Plus Offset Operand 4918 // Note: x86 architecture doesn't support "scale * index + offset" without a base 4919 // we can't free r12 even with Universe::narrow_oop_base() == NULL. 4920 operand indCompressedOopOffset(rRegN reg, immL32 off) %{ 4921 predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8)); 4922 constraint(ALLOC_IN_RC(ptr_reg)); 4923 match(AddP (DecodeN reg) off); 4924 4925 op_cost(10); 4926 format %{"[R12 + $reg << 3 + $off] (compressed oop addressing)" %} 4927 interface(MEMORY_INTER) %{ 4928 base(0xc); // R12 4929 index($reg); 4930 scale(0x3); 4931 disp($off); 4932 %} 4933 %} 4934 4935 // Indirect Memory Operand 4936 operand indirectNarrow(rRegN reg) 4937 %{ 4938 predicate(Universe::narrow_oop_shift() == 0); 4939 constraint(ALLOC_IN_RC(ptr_reg)); 4940 match(DecodeN reg); 4941 4942 format %{ "[$reg]" %} 4943 interface(MEMORY_INTER) %{ 4944 base($reg); 4945 index(0x4); 4946 scale(0x0); 4947 disp(0x0); 4948 %} 4949 %} 4950 4951 // Indirect Memory Plus Short Offset Operand 4952 operand indOffset8Narrow(rRegN reg, immL8 off) 4953 %{ 4954 predicate(Universe::narrow_oop_shift() == 0); 4955 constraint(ALLOC_IN_RC(ptr_reg)); 4956 match(AddP (DecodeN reg) off); 4957 4958 format %{ "[$reg + $off (8-bit)]" %} 4959 interface(MEMORY_INTER) %{ 4960 base($reg); 4961 index(0x4); 4962 scale(0x0); 4963 disp($off); 4964 %} 4965 %} 4966 4967 // Indirect Memory Plus Long Offset Operand 4968 operand indOffset32Narrow(rRegN reg, immL32 off) 4969 %{ 4970 predicate(Universe::narrow_oop_shift() == 0); 4971 constraint(ALLOC_IN_RC(ptr_reg)); 4972 match(AddP (DecodeN reg) off); 4973 4974 format %{ "[$reg + $off (32-bit)]" %} 4975 interface(MEMORY_INTER) %{ 4976 base($reg); 4977 index(0x4); 4978 scale(0x0); 4979 disp($off); 4980 %} 4981 %} 4982 4983 // Indirect Memory Plus Index Register Plus Offset Operand 4984 operand indIndexOffsetNarrow(rRegN reg, rRegL lreg, immL32 off) 4985 %{ 4986 predicate(Universe::narrow_oop_shift() == 0); 4987 constraint(ALLOC_IN_RC(ptr_reg)); 4988 match(AddP (AddP (DecodeN reg) lreg) off); 4989 4990 op_cost(10); 4991 format %{"[$reg + $off + $lreg]" %} 4992 interface(MEMORY_INTER) %{ 4993 base($reg); 4994 index($lreg); 4995 scale(0x0); 4996 disp($off); 4997 %} 4998 %} 4999 5000 // Indirect Memory Plus Index Register Plus Offset Operand 5001 operand indIndexNarrow(rRegN reg, rRegL lreg) 5002 %{ 5003 predicate(Universe::narrow_oop_shift() == 0); 5004 constraint(ALLOC_IN_RC(ptr_reg)); 5005 match(AddP (DecodeN reg) lreg); 5006 5007 op_cost(10); 5008 format %{"[$reg + $lreg]" %} 5009 interface(MEMORY_INTER) %{ 5010 base($reg); 5011 index($lreg); 5012 scale(0x0); 5013 disp(0x0); 5014 %} 5015 %} 5016 5017 // Indirect Memory Times Scale Plus Index Register 5018 operand indIndexScaleNarrow(rRegN reg, rRegL lreg, immI2 scale) 5019 %{ 5020 predicate(Universe::narrow_oop_shift() == 0); 5021 constraint(ALLOC_IN_RC(ptr_reg)); 5022 match(AddP (DecodeN reg) (LShiftL lreg scale)); 5023 5024 op_cost(10); 5025 format %{"[$reg + $lreg << $scale]" %} 5026 interface(MEMORY_INTER) %{ 5027 base($reg); 5028 index($lreg); 5029 scale($scale); 5030 disp(0x0); 5031 %} 5032 %} 5033 5034 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand 5035 operand indIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegL lreg, immI2 scale) 5036 %{ 5037 predicate(Universe::narrow_oop_shift() == 0); 5038 constraint(ALLOC_IN_RC(ptr_reg)); 5039 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off); 5040 5041 op_cost(10); 5042 format %{"[$reg + $off + $lreg << $scale]" %} 5043 interface(MEMORY_INTER) %{ 5044 base($reg); 5045 index($lreg); 5046 scale($scale); 5047 disp($off); 5048 %} 5049 %} 5050 5051 // Indirect Memory Times Scale Plus Positive Index Register Plus Offset Operand 5052 operand indPosIndexScaleOffsetNarrow(rRegN reg, immL32 off, rRegI idx, immI2 scale) 5053 %{ 5054 constraint(ALLOC_IN_RC(ptr_reg)); 5055 predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0); 5056 match(AddP (AddP (DecodeN reg) (LShiftL (ConvI2L idx) scale)) off); 5057 5058 op_cost(10); 5059 format %{"[$reg + $off + $idx << $scale]" %} 5060 interface(MEMORY_INTER) %{ 5061 base($reg); 5062 index($idx); 5063 scale($scale); 5064 disp($off); 5065 %} 5066 %} 5067 5068 5069 //----------Special Memory Operands-------------------------------------------- 5070 // Stack Slot Operand - This operand is used for loading and storing temporary 5071 // values on the stack where a match requires a value to 5072 // flow through memory. 5073 operand stackSlotP(sRegP reg) 5074 %{ 5075 constraint(ALLOC_IN_RC(stack_slots)); 5076 // No match rule because this operand is only generated in matching 5077 5078 format %{ "[$reg]" %} 5079 interface(MEMORY_INTER) %{ 5080 base(0x4); // RSP 5081 index(0x4); // No Index 5082 scale(0x0); // No Scale 5083 disp($reg); // Stack Offset 5084 %} 5085 %} 5086 5087 operand stackSlotI(sRegI reg) 5088 %{ 5089 constraint(ALLOC_IN_RC(stack_slots)); 5090 // No match rule because this operand is only generated in matching 5091 5092 format %{ "[$reg]" %} 5093 interface(MEMORY_INTER) %{ 5094 base(0x4); // RSP 5095 index(0x4); // No Index 5096 scale(0x0); // No Scale 5097 disp($reg); // Stack Offset 5098 %} 5099 %} 5100 5101 operand stackSlotF(sRegF reg) 5102 %{ 5103 constraint(ALLOC_IN_RC(stack_slots)); 5104 // No match rule because this operand is only generated in matching 5105 5106 format %{ "[$reg]" %} 5107 interface(MEMORY_INTER) %{ 5108 base(0x4); // RSP 5109 index(0x4); // No Index 5110 scale(0x0); // No Scale 5111 disp($reg); // Stack Offset 5112 %} 5113 %} 5114 5115 operand stackSlotD(sRegD reg) 5116 %{ 5117 constraint(ALLOC_IN_RC(stack_slots)); 5118 // No match rule because this operand is only generated in matching 5119 5120 format %{ "[$reg]" %} 5121 interface(MEMORY_INTER) %{ 5122 base(0x4); // RSP 5123 index(0x4); // No Index 5124 scale(0x0); // No Scale 5125 disp($reg); // Stack Offset 5126 %} 5127 %} 5128 operand stackSlotL(sRegL reg) 5129 %{ 5130 constraint(ALLOC_IN_RC(stack_slots)); 5131 // No match rule because this operand is only generated in matching 5132 5133 format %{ "[$reg]" %} 5134 interface(MEMORY_INTER) %{ 5135 base(0x4); // RSP 5136 index(0x4); // No Index 5137 scale(0x0); // No Scale 5138 disp($reg); // Stack Offset 5139 %} 5140 %} 5141 5142 //----------Conditional Branch Operands---------------------------------------- 5143 // Comparison Op - This is the operation of the comparison, and is limited to 5144 // the following set of codes: 5145 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) 5146 // 5147 // Other attributes of the comparison, such as unsignedness, are specified 5148 // by the comparison instruction that sets a condition code flags register. 5149 // That result is represented by a flags operand whose subtype is appropriate 5150 // to the unsignedness (etc.) of the comparison. 5151 // 5152 // Later, the instruction which matches both the Comparison Op (a Bool) and 5153 // the flags (produced by the Cmp) specifies the coding of the comparison op 5154 // by matching a specific subtype of Bool operand below, such as cmpOpU. 5155 5156 // Comparision Code 5157 operand cmpOp() 5158 %{ 5159 match(Bool); 5160 5161 format %{ "" %} 5162 interface(COND_INTER) %{ 5163 equal(0x4, "e"); 5164 not_equal(0x5, "ne"); 5165 less(0xC, "l"); 5166 greater_equal(0xD, "ge"); 5167 less_equal(0xE, "le"); 5168 greater(0xF, "g"); 5169 %} 5170 %} 5171 5172 // Comparison Code, unsigned compare. Used by FP also, with 5173 // C2 (unordered) turned into GT or LT already. The other bits 5174 // C0 and C3 are turned into Carry & Zero flags. 5175 operand cmpOpU() 5176 %{ 5177 match(Bool); 5178 5179 format %{ "" %} 5180 interface(COND_INTER) %{ 5181 equal(0x4, "e"); 5182 not_equal(0x5, "ne"); 5183 less(0x2, "b"); 5184 greater_equal(0x3, "nb"); 5185 less_equal(0x6, "be"); 5186 greater(0x7, "nbe"); 5187 %} 5188 %} 5189 5190 5191 // Floating comparisons that don't require any fixup for the unordered case 5192 operand cmpOpUCF() %{ 5193 match(Bool); 5194 predicate(n->as_Bool()->_test._test == BoolTest::lt || 5195 n->as_Bool()->_test._test == BoolTest::ge || 5196 n->as_Bool()->_test._test == BoolTest::le || 5197 n->as_Bool()->_test._test == BoolTest::gt); 5198 format %{ "" %} 5199 interface(COND_INTER) %{ 5200 equal(0x4, "e"); 5201 not_equal(0x5, "ne"); 5202 less(0x2, "b"); 5203 greater_equal(0x3, "nb"); 5204 less_equal(0x6, "be"); 5205 greater(0x7, "nbe"); 5206 %} 5207 %} 5208 5209 5210 // Floating comparisons that can be fixed up with extra conditional jumps 5211 operand cmpOpUCF2() %{ 5212 match(Bool); 5213 predicate(n->as_Bool()->_test._test == BoolTest::ne || 5214 n->as_Bool()->_test._test == BoolTest::eq); 5215 format %{ "" %} 5216 interface(COND_INTER) %{ 5217 equal(0x4, "e"); 5218 not_equal(0x5, "ne"); 5219 less(0x2, "b"); 5220 greater_equal(0x3, "nb"); 5221 less_equal(0x6, "be"); 5222 greater(0x7, "nbe"); 5223 %} 5224 %} 5225 5226 5227 //----------OPERAND CLASSES---------------------------------------------------- 5228 // Operand Classes are groups of operands that are used as to simplify 5229 // instruction definitions by not requiring the AD writer to specify separate 5230 // instructions for every form of operand when the instruction accepts 5231 // multiple operand types with the same basic encoding and format. The classic 5232 // case of this is memory operands. 5233 5234 opclass memory(indirect, indOffset8, indOffset32, indIndexOffset, indIndex, 5235 indIndexScale, indIndexScaleOffset, indPosIndexScaleOffset, 5236 indCompressedOopOffset, 5237 indirectNarrow, indOffset8Narrow, indOffset32Narrow, 5238 indIndexOffsetNarrow, indIndexNarrow, indIndexScaleNarrow, 5239 indIndexScaleOffsetNarrow, indPosIndexScaleOffsetNarrow); 5240 5241 //----------PIPELINE----------------------------------------------------------- 5242 // Rules which define the behavior of the target architectures pipeline. 5243 pipeline %{ 5244 5245 //----------ATTRIBUTES--------------------------------------------------------- 5246 attributes %{ 5247 variable_size_instructions; // Fixed size instructions 5248 max_instructions_per_bundle = 3; // Up to 3 instructions per bundle 5249 instruction_unit_size = 1; // An instruction is 1 bytes long 5250 instruction_fetch_unit_size = 16; // The processor fetches one line 5251 instruction_fetch_units = 1; // of 16 bytes 5252 5253 // List of nop instructions 5254 nops( MachNop ); 5255 %} 5256 5257 //----------RESOURCES---------------------------------------------------------- 5258 // Resources are the functional units available to the machine 5259 5260 // Generic P2/P3 pipeline 5261 // 3 decoders, only D0 handles big operands; a "bundle" is the limit of 5262 // 3 instructions decoded per cycle. 5263 // 2 load/store ops per cycle, 1 branch, 1 FPU, 5264 // 3 ALU op, only ALU0 handles mul instructions. 5265 resources( D0, D1, D2, DECODE = D0 | D1 | D2, 5266 MS0, MS1, MS2, MEM = MS0 | MS1 | MS2, 5267 BR, FPU, 5268 ALU0, ALU1, ALU2, ALU = ALU0 | ALU1 | ALU2); 5269 5270 //----------PIPELINE DESCRIPTION----------------------------------------------- 5271 // Pipeline Description specifies the stages in the machine's pipeline 5272 5273 // Generic P2/P3 pipeline 5274 pipe_desc(S0, S1, S2, S3, S4, S5); 5275 5276 //----------PIPELINE CLASSES--------------------------------------------------- 5277 // Pipeline Classes describe the stages in which input and output are 5278 // referenced by the hardware pipeline. 5279 5280 // Naming convention: ialu or fpu 5281 // Then: _reg 5282 // Then: _reg if there is a 2nd register 5283 // Then: _long if it's a pair of instructions implementing a long 5284 // Then: _fat if it requires the big decoder 5285 // Or: _mem if it requires the big decoder and a memory unit. 5286 5287 // Integer ALU reg operation 5288 pipe_class ialu_reg(rRegI dst) 5289 %{ 5290 single_instruction; 5291 dst : S4(write); 5292 dst : S3(read); 5293 DECODE : S0; // any decoder 5294 ALU : S3; // any alu 5295 %} 5296 5297 // Long ALU reg operation 5298 pipe_class ialu_reg_long(rRegL dst) 5299 %{ 5300 instruction_count(2); 5301 dst : S4(write); 5302 dst : S3(read); 5303 DECODE : S0(2); // any 2 decoders 5304 ALU : S3(2); // both alus 5305 %} 5306 5307 // Integer ALU reg operation using big decoder 5308 pipe_class ialu_reg_fat(rRegI dst) 5309 %{ 5310 single_instruction; 5311 dst : S4(write); 5312 dst : S3(read); 5313 D0 : S0; // big decoder only 5314 ALU : S3; // any alu 5315 %} 5316 5317 // Long ALU reg operation using big decoder 5318 pipe_class ialu_reg_long_fat(rRegL dst) 5319 %{ 5320 instruction_count(2); 5321 dst : S4(write); 5322 dst : S3(read); 5323 D0 : S0(2); // big decoder only; twice 5324 ALU : S3(2); // any 2 alus 5325 %} 5326 5327 // Integer ALU reg-reg operation 5328 pipe_class ialu_reg_reg(rRegI dst, rRegI src) 5329 %{ 5330 single_instruction; 5331 dst : S4(write); 5332 src : S3(read); 5333 DECODE : S0; // any decoder 5334 ALU : S3; // any alu 5335 %} 5336 5337 // Long ALU reg-reg operation 5338 pipe_class ialu_reg_reg_long(rRegL dst, rRegL src) 5339 %{ 5340 instruction_count(2); 5341 dst : S4(write); 5342 src : S3(read); 5343 DECODE : S0(2); // any 2 decoders 5344 ALU : S3(2); // both alus 5345 %} 5346 5347 // Integer ALU reg-reg operation 5348 pipe_class ialu_reg_reg_fat(rRegI dst, memory src) 5349 %{ 5350 single_instruction; 5351 dst : S4(write); 5352 src : S3(read); 5353 D0 : S0; // big decoder only 5354 ALU : S3; // any alu 5355 %} 5356 5357 // Long ALU reg-reg operation 5358 pipe_class ialu_reg_reg_long_fat(rRegL dst, rRegL src) 5359 %{ 5360 instruction_count(2); 5361 dst : S4(write); 5362 src : S3(read); 5363 D0 : S0(2); // big decoder only; twice 5364 ALU : S3(2); // both alus 5365 %} 5366 5367 // Integer ALU reg-mem operation 5368 pipe_class ialu_reg_mem(rRegI dst, memory mem) 5369 %{ 5370 single_instruction; 5371 dst : S5(write); 5372 mem : S3(read); 5373 D0 : S0; // big decoder only 5374 ALU : S4; // any alu 5375 MEM : S3; // any mem 5376 %} 5377 5378 // Integer mem operation (prefetch) 5379 pipe_class ialu_mem(memory mem) 5380 %{ 5381 single_instruction; 5382 mem : S3(read); 5383 D0 : S0; // big decoder only 5384 MEM : S3; // any mem 5385 %} 5386 5387 // Integer Store to Memory 5388 pipe_class ialu_mem_reg(memory mem, rRegI src) 5389 %{ 5390 single_instruction; 5391 mem : S3(read); 5392 src : S5(read); 5393 D0 : S0; // big decoder only 5394 ALU : S4; // any alu 5395 MEM : S3; 5396 %} 5397 5398 // // Long Store to Memory 5399 // pipe_class ialu_mem_long_reg(memory mem, rRegL src) 5400 // %{ 5401 // instruction_count(2); 5402 // mem : S3(read); 5403 // src : S5(read); 5404 // D0 : S0(2); // big decoder only; twice 5405 // ALU : S4(2); // any 2 alus 5406 // MEM : S3(2); // Both mems 5407 // %} 5408 5409 // Integer Store to Memory 5410 pipe_class ialu_mem_imm(memory mem) 5411 %{ 5412 single_instruction; 5413 mem : S3(read); 5414 D0 : S0; // big decoder only 5415 ALU : S4; // any alu 5416 MEM : S3; 5417 %} 5418 5419 // Integer ALU0 reg-reg operation 5420 pipe_class ialu_reg_reg_alu0(rRegI dst, rRegI src) 5421 %{ 5422 single_instruction; 5423 dst : S4(write); 5424 src : S3(read); 5425 D0 : S0; // Big decoder only 5426 ALU0 : S3; // only alu0 5427 %} 5428 5429 // Integer ALU0 reg-mem operation 5430 pipe_class ialu_reg_mem_alu0(rRegI dst, memory mem) 5431 %{ 5432 single_instruction; 5433 dst : S5(write); 5434 mem : S3(read); 5435 D0 : S0; // big decoder only 5436 ALU0 : S4; // ALU0 only 5437 MEM : S3; // any mem 5438 %} 5439 5440 // Integer ALU reg-reg operation 5441 pipe_class ialu_cr_reg_reg(rFlagsReg cr, rRegI src1, rRegI src2) 5442 %{ 5443 single_instruction; 5444 cr : S4(write); 5445 src1 : S3(read); 5446 src2 : S3(read); 5447 DECODE : S0; // any decoder 5448 ALU : S3; // any alu 5449 %} 5450 5451 // Integer ALU reg-imm operation 5452 pipe_class ialu_cr_reg_imm(rFlagsReg cr, rRegI src1) 5453 %{ 5454 single_instruction; 5455 cr : S4(write); 5456 src1 : S3(read); 5457 DECODE : S0; // any decoder 5458 ALU : S3; // any alu 5459 %} 5460 5461 // Integer ALU reg-mem operation 5462 pipe_class ialu_cr_reg_mem(rFlagsReg cr, rRegI src1, memory src2) 5463 %{ 5464 single_instruction; 5465 cr : S4(write); 5466 src1 : S3(read); 5467 src2 : S3(read); 5468 D0 : S0; // big decoder only 5469 ALU : S4; // any alu 5470 MEM : S3; 5471 %} 5472 5473 // Conditional move reg-reg 5474 pipe_class pipe_cmplt( rRegI p, rRegI q, rRegI y) 5475 %{ 5476 instruction_count(4); 5477 y : S4(read); 5478 q : S3(read); 5479 p : S3(read); 5480 DECODE : S0(4); // any decoder 5481 %} 5482 5483 // Conditional move reg-reg 5484 pipe_class pipe_cmov_reg( rRegI dst, rRegI src, rFlagsReg cr) 5485 %{ 5486 single_instruction; 5487 dst : S4(write); 5488 src : S3(read); 5489 cr : S3(read); 5490 DECODE : S0; // any decoder 5491 %} 5492 5493 // Conditional move reg-mem 5494 pipe_class pipe_cmov_mem( rFlagsReg cr, rRegI dst, memory src) 5495 %{ 5496 single_instruction; 5497 dst : S4(write); 5498 src : S3(read); 5499 cr : S3(read); 5500 DECODE : S0; // any decoder 5501 MEM : S3; 5502 %} 5503 5504 // Conditional move reg-reg long 5505 pipe_class pipe_cmov_reg_long( rFlagsReg cr, rRegL dst, rRegL src) 5506 %{ 5507 single_instruction; 5508 dst : S4(write); 5509 src : S3(read); 5510 cr : S3(read); 5511 DECODE : S0(2); // any 2 decoders 5512 %} 5513 5514 // XXX 5515 // // Conditional move double reg-reg 5516 // pipe_class pipe_cmovD_reg( rFlagsReg cr, regDPR1 dst, regD src) 5517 // %{ 5518 // single_instruction; 5519 // dst : S4(write); 5520 // src : S3(read); 5521 // cr : S3(read); 5522 // DECODE : S0; // any decoder 5523 // %} 5524 5525 // Float reg-reg operation 5526 pipe_class fpu_reg(regD dst) 5527 %{ 5528 instruction_count(2); 5529 dst : S3(read); 5530 DECODE : S0(2); // any 2 decoders 5531 FPU : S3; 5532 %} 5533 5534 // Float reg-reg operation 5535 pipe_class fpu_reg_reg(regD dst, regD src) 5536 %{ 5537 instruction_count(2); 5538 dst : S4(write); 5539 src : S3(read); 5540 DECODE : S0(2); // any 2 decoders 5541 FPU : S3; 5542 %} 5543 5544 // Float reg-reg operation 5545 pipe_class fpu_reg_reg_reg(regD dst, regD src1, regD src2) 5546 %{ 5547 instruction_count(3); 5548 dst : S4(write); 5549 src1 : S3(read); 5550 src2 : S3(read); 5551 DECODE : S0(3); // any 3 decoders 5552 FPU : S3(2); 5553 %} 5554 5555 // Float reg-reg operation 5556 pipe_class fpu_reg_reg_reg_reg(regD dst, regD src1, regD src2, regD src3) 5557 %{ 5558 instruction_count(4); 5559 dst : S4(write); 5560 src1 : S3(read); 5561 src2 : S3(read); 5562 src3 : S3(read); 5563 DECODE : S0(4); // any 3 decoders 5564 FPU : S3(2); 5565 %} 5566 5567 // Float reg-reg operation 5568 pipe_class fpu_reg_mem_reg_reg(regD dst, memory src1, regD src2, regD src3) 5569 %{ 5570 instruction_count(4); 5571 dst : S4(write); 5572 src1 : S3(read); 5573 src2 : S3(read); 5574 src3 : S3(read); 5575 DECODE : S1(3); // any 3 decoders 5576 D0 : S0; // Big decoder only 5577 FPU : S3(2); 5578 MEM : S3; 5579 %} 5580 5581 // Float reg-mem operation 5582 pipe_class fpu_reg_mem(regD dst, memory mem) 5583 %{ 5584 instruction_count(2); 5585 dst : S5(write); 5586 mem : S3(read); 5587 D0 : S0; // big decoder only 5588 DECODE : S1; // any decoder for FPU POP 5589 FPU : S4; 5590 MEM : S3; // any mem 5591 %} 5592 5593 // Float reg-mem operation 5594 pipe_class fpu_reg_reg_mem(regD dst, regD src1, memory mem) 5595 %{ 5596 instruction_count(3); 5597 dst : S5(write); 5598 src1 : S3(read); 5599 mem : S3(read); 5600 D0 : S0; // big decoder only 5601 DECODE : S1(2); // any decoder for FPU POP 5602 FPU : S4; 5603 MEM : S3; // any mem 5604 %} 5605 5606 // Float mem-reg operation 5607 pipe_class fpu_mem_reg(memory mem, regD src) 5608 %{ 5609 instruction_count(2); 5610 src : S5(read); 5611 mem : S3(read); 5612 DECODE : S0; // any decoder for FPU PUSH 5613 D0 : S1; // big decoder only 5614 FPU : S4; 5615 MEM : S3; // any mem 5616 %} 5617 5618 pipe_class fpu_mem_reg_reg(memory mem, regD src1, regD src2) 5619 %{ 5620 instruction_count(3); 5621 src1 : S3(read); 5622 src2 : S3(read); 5623 mem : S3(read); 5624 DECODE : S0(2); // any decoder for FPU PUSH 5625 D0 : S1; // big decoder only 5626 FPU : S4; 5627 MEM : S3; // any mem 5628 %} 5629 5630 pipe_class fpu_mem_reg_mem(memory mem, regD src1, memory src2) 5631 %{ 5632 instruction_count(3); 5633 src1 : S3(read); 5634 src2 : S3(read); 5635 mem : S4(read); 5636 DECODE : S0; // any decoder for FPU PUSH 5637 D0 : S0(2); // big decoder only 5638 FPU : S4; 5639 MEM : S3(2); // any mem 5640 %} 5641 5642 pipe_class fpu_mem_mem(memory dst, memory src1) 5643 %{ 5644 instruction_count(2); 5645 src1 : S3(read); 5646 dst : S4(read); 5647 D0 : S0(2); // big decoder only 5648 MEM : S3(2); // any mem 5649 %} 5650 5651 pipe_class fpu_mem_mem_mem(memory dst, memory src1, memory src2) 5652 %{ 5653 instruction_count(3); 5654 src1 : S3(read); 5655 src2 : S3(read); 5656 dst : S4(read); 5657 D0 : S0(3); // big decoder only 5658 FPU : S4; 5659 MEM : S3(3); // any mem 5660 %} 5661 5662 pipe_class fpu_mem_reg_con(memory mem, regD src1) 5663 %{ 5664 instruction_count(3); 5665 src1 : S4(read); 5666 mem : S4(read); 5667 DECODE : S0; // any decoder for FPU PUSH 5668 D0 : S0(2); // big decoder only 5669 FPU : S4; 5670 MEM : S3(2); // any mem 5671 %} 5672 5673 // Float load constant 5674 pipe_class fpu_reg_con(regD dst) 5675 %{ 5676 instruction_count(2); 5677 dst : S5(write); 5678 D0 : S0; // big decoder only for the load 5679 DECODE : S1; // any decoder for FPU POP 5680 FPU : S4; 5681 MEM : S3; // any mem 5682 %} 5683 5684 // Float load constant 5685 pipe_class fpu_reg_reg_con(regD dst, regD src) 5686 %{ 5687 instruction_count(3); 5688 dst : S5(write); 5689 src : S3(read); 5690 D0 : S0; // big decoder only for the load 5691 DECODE : S1(2); // any decoder for FPU POP 5692 FPU : S4; 5693 MEM : S3; // any mem 5694 %} 5695 5696 // UnConditional branch 5697 pipe_class pipe_jmp(label labl) 5698 %{ 5699 single_instruction; 5700 BR : S3; 5701 %} 5702 5703 // Conditional branch 5704 pipe_class pipe_jcc(cmpOp cmp, rFlagsReg cr, label labl) 5705 %{ 5706 single_instruction; 5707 cr : S1(read); 5708 BR : S3; 5709 %} 5710 5711 // Allocation idiom 5712 pipe_class pipe_cmpxchg(rRegP dst, rRegP heap_ptr) 5713 %{ 5714 instruction_count(1); force_serialization; 5715 fixed_latency(6); 5716 heap_ptr : S3(read); 5717 DECODE : S0(3); 5718 D0 : S2; 5719 MEM : S3; 5720 ALU : S3(2); 5721 dst : S5(write); 5722 BR : S5; 5723 %} 5724 5725 // Generic big/slow expanded idiom 5726 pipe_class pipe_slow() 5727 %{ 5728 instruction_count(10); multiple_bundles; force_serialization; 5729 fixed_latency(100); 5730 D0 : S0(2); 5731 MEM : S3(2); 5732 %} 5733 5734 // The real do-nothing guy 5735 pipe_class empty() 5736 %{ 5737 instruction_count(0); 5738 %} 5739 5740 // Define the class for the Nop node 5741 define 5742 %{ 5743 MachNop = empty; 5744 %} 5745 5746 %} 5747 5748 //----------INSTRUCTIONS------------------------------------------------------- 5749 // 5750 // match -- States which machine-independent subtree may be replaced 5751 // by this instruction. 5752 // ins_cost -- The estimated cost of this instruction is used by instruction 5753 // selection to identify a minimum cost tree of machine 5754 // instructions that matches a tree of machine-independent 5755 // instructions. 5756 // format -- A string providing the disassembly for this instruction. 5757 // The value of an instruction's operand may be inserted 5758 // by referring to it with a '$' prefix. 5759 // opcode -- Three instruction opcodes may be provided. These are referred 5760 // to within an encode class as $primary, $secondary, and $tertiary 5761 // rrspectively. The primary opcode is commonly used to 5762 // indicate the type of machine instruction, while secondary 5763 // and tertiary are often used for prefix options or addressing 5764 // modes. 5765 // ins_encode -- A list of encode classes with parameters. The encode class 5766 // name must have been defined in an 'enc_class' specification 5767 // in the encode section of the architecture description. 5768 5769 5770 //----------Load/Store/Move Instructions--------------------------------------- 5771 //----------Load Instructions-------------------------------------------------- 5772 5773 // Load Byte (8 bit signed) 5774 instruct loadB(rRegI dst, memory mem) 5775 %{ 5776 match(Set dst (LoadB mem)); 5777 5778 ins_cost(125); 5779 format %{ "movsbl $dst, $mem\t# byte" %} 5780 5781 ins_encode %{ 5782 __ movsbl($dst$$Register, $mem$$Address); 5783 %} 5784 5785 ins_pipe(ialu_reg_mem); 5786 %} 5787 5788 // Load Byte (8 bit signed) into Long Register 5789 instruct loadB2L(rRegL dst, memory mem) 5790 %{ 5791 match(Set dst (ConvI2L (LoadB mem))); 5792 5793 ins_cost(125); 5794 format %{ "movsbq $dst, $mem\t# byte -> long" %} 5795 5796 ins_encode %{ 5797 __ movsbq($dst$$Register, $mem$$Address); 5798 %} 5799 5800 ins_pipe(ialu_reg_mem); 5801 %} 5802 5803 // Load Unsigned Byte (8 bit UNsigned) 5804 instruct loadUB(rRegI dst, memory mem) 5805 %{ 5806 match(Set dst (LoadUB mem)); 5807 5808 ins_cost(125); 5809 format %{ "movzbl $dst, $mem\t# ubyte" %} 5810 5811 ins_encode %{ 5812 __ movzbl($dst$$Register, $mem$$Address); 5813 %} 5814 5815 ins_pipe(ialu_reg_mem); 5816 %} 5817 5818 // Load Unsigned Byte (8 bit UNsigned) into Long Register 5819 instruct loadUB2L(rRegL dst, memory mem) 5820 %{ 5821 match(Set dst (ConvI2L (LoadUB mem))); 5822 5823 ins_cost(125); 5824 format %{ "movzbq $dst, $mem\t# ubyte -> long" %} 5825 5826 ins_encode %{ 5827 __ movzbq($dst$$Register, $mem$$Address); 5828 %} 5829 5830 ins_pipe(ialu_reg_mem); 5831 %} 5832 5833 // Load Unsigned Byte (8 bit UNsigned) with a 8-bit mask into Long Register 5834 instruct loadUB2L_immI8(rRegL dst, memory mem, immI8 mask, rFlagsReg cr) %{ 5835 match(Set dst (ConvI2L (AndI (LoadUB mem) mask))); 5836 effect(KILL cr); 5837 5838 format %{ "movzbq $dst, $mem\t# ubyte & 8-bit mask -> long\n\t" 5839 "andl $dst, $mask" %} 5840 ins_encode %{ 5841 Register Rdst = $dst$$Register; 5842 __ movzbq(Rdst, $mem$$Address); 5843 __ andl(Rdst, $mask$$constant); 5844 %} 5845 ins_pipe(ialu_reg_mem); 5846 %} 5847 5848 // Load Short (16 bit signed) 5849 instruct loadS(rRegI dst, memory mem) 5850 %{ 5851 match(Set dst (LoadS mem)); 5852 5853 ins_cost(125); 5854 format %{ "movswl $dst, $mem\t# short" %} 5855 5856 ins_encode %{ 5857 __ movswl($dst$$Register, $mem$$Address); 5858 %} 5859 5860 ins_pipe(ialu_reg_mem); 5861 %} 5862 5863 // Load Short (16 bit signed) to Byte (8 bit signed) 5864 instruct loadS2B(rRegI dst, memory mem, immI_24 twentyfour) %{ 5865 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour)); 5866 5867 ins_cost(125); 5868 format %{ "movsbl $dst, $mem\t# short -> byte" %} 5869 ins_encode %{ 5870 __ movsbl($dst$$Register, $mem$$Address); 5871 %} 5872 ins_pipe(ialu_reg_mem); 5873 %} 5874 5875 // Load Short (16 bit signed) into Long Register 5876 instruct loadS2L(rRegL dst, memory mem) 5877 %{ 5878 match(Set dst (ConvI2L (LoadS mem))); 5879 5880 ins_cost(125); 5881 format %{ "movswq $dst, $mem\t# short -> long" %} 5882 5883 ins_encode %{ 5884 __ movswq($dst$$Register, $mem$$Address); 5885 %} 5886 5887 ins_pipe(ialu_reg_mem); 5888 %} 5889 5890 // Load Unsigned Short/Char (16 bit UNsigned) 5891 instruct loadUS(rRegI dst, memory mem) 5892 %{ 5893 match(Set dst (LoadUS mem)); 5894 5895 ins_cost(125); 5896 format %{ "movzwl $dst, $mem\t# ushort/char" %} 5897 5898 ins_encode %{ 5899 __ movzwl($dst$$Register, $mem$$Address); 5900 %} 5901 5902 ins_pipe(ialu_reg_mem); 5903 %} 5904 5905 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed) 5906 instruct loadUS2B(rRegI dst, memory mem, immI_24 twentyfour) %{ 5907 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour)); 5908 5909 ins_cost(125); 5910 format %{ "movsbl $dst, $mem\t# ushort -> byte" %} 5911 ins_encode %{ 5912 __ movsbl($dst$$Register, $mem$$Address); 5913 %} 5914 ins_pipe(ialu_reg_mem); 5915 %} 5916 5917 // Load Unsigned Short/Char (16 bit UNsigned) into Long Register 5918 instruct loadUS2L(rRegL dst, memory mem) 5919 %{ 5920 match(Set dst (ConvI2L (LoadUS mem))); 5921 5922 ins_cost(125); 5923 format %{ "movzwq $dst, $mem\t# ushort/char -> long" %} 5924 5925 ins_encode %{ 5926 __ movzwq($dst$$Register, $mem$$Address); 5927 %} 5928 5929 ins_pipe(ialu_reg_mem); 5930 %} 5931 5932 // Load Unsigned Short/Char (16 bit UNsigned) with mask 0xFF into Long Register 5933 instruct loadUS2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{ 5934 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5935 5936 format %{ "movzbq $dst, $mem\t# ushort/char & 0xFF -> long" %} 5937 ins_encode %{ 5938 __ movzbq($dst$$Register, $mem$$Address); 5939 %} 5940 ins_pipe(ialu_reg_mem); 5941 %} 5942 5943 // Load Unsigned Short/Char (16 bit UNsigned) with mask into Long Register 5944 instruct loadUS2L_immI16(rRegL dst, memory mem, immI16 mask, rFlagsReg cr) %{ 5945 match(Set dst (ConvI2L (AndI (LoadUS mem) mask))); 5946 effect(KILL cr); 5947 5948 format %{ "movzwq $dst, $mem\t# ushort/char & 16-bit mask -> long\n\t" 5949 "andl $dst, $mask" %} 5950 ins_encode %{ 5951 Register Rdst = $dst$$Register; 5952 __ movzwq(Rdst, $mem$$Address); 5953 __ andl(Rdst, $mask$$constant); 5954 %} 5955 ins_pipe(ialu_reg_mem); 5956 %} 5957 5958 // Load Integer 5959 instruct loadI(rRegI dst, memory mem) 5960 %{ 5961 match(Set dst (LoadI mem)); 5962 5963 ins_cost(125); 5964 format %{ "movl $dst, $mem\t# int" %} 5965 5966 ins_encode %{ 5967 __ movl($dst$$Register, $mem$$Address); 5968 %} 5969 5970 ins_pipe(ialu_reg_mem); 5971 %} 5972 5973 // Load Integer (32 bit signed) to Byte (8 bit signed) 5974 instruct loadI2B(rRegI dst, memory mem, immI_24 twentyfour) %{ 5975 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour)); 5976 5977 ins_cost(125); 5978 format %{ "movsbl $dst, $mem\t# int -> byte" %} 5979 ins_encode %{ 5980 __ movsbl($dst$$Register, $mem$$Address); 5981 %} 5982 ins_pipe(ialu_reg_mem); 5983 %} 5984 5985 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned) 5986 instruct loadI2UB(rRegI dst, memory mem, immI_255 mask) %{ 5987 match(Set dst (AndI (LoadI mem) mask)); 5988 5989 ins_cost(125); 5990 format %{ "movzbl $dst, $mem\t# int -> ubyte" %} 5991 ins_encode %{ 5992 __ movzbl($dst$$Register, $mem$$Address); 5993 %} 5994 ins_pipe(ialu_reg_mem); 5995 %} 5996 5997 // Load Integer (32 bit signed) to Short (16 bit signed) 5998 instruct loadI2S(rRegI dst, memory mem, immI_16 sixteen) %{ 5999 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen)); 6000 6001 ins_cost(125); 6002 format %{ "movswl $dst, $mem\t# int -> short" %} 6003 ins_encode %{ 6004 __ movswl($dst$$Register, $mem$$Address); 6005 %} 6006 ins_pipe(ialu_reg_mem); 6007 %} 6008 6009 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned) 6010 instruct loadI2US(rRegI dst, memory mem, immI_65535 mask) %{ 6011 match(Set dst (AndI (LoadI mem) mask)); 6012 6013 ins_cost(125); 6014 format %{ "movzwl $dst, $mem\t# int -> ushort/char" %} 6015 ins_encode %{ 6016 __ movzwl($dst$$Register, $mem$$Address); 6017 %} 6018 ins_pipe(ialu_reg_mem); 6019 %} 6020 6021 // Load Integer into Long Register 6022 instruct loadI2L(rRegL dst, memory mem) 6023 %{ 6024 match(Set dst (ConvI2L (LoadI mem))); 6025 6026 ins_cost(125); 6027 format %{ "movslq $dst, $mem\t# int -> long" %} 6028 6029 ins_encode %{ 6030 __ movslq($dst$$Register, $mem$$Address); 6031 %} 6032 6033 ins_pipe(ialu_reg_mem); 6034 %} 6035 6036 // Load Integer with mask 0xFF into Long Register 6037 instruct loadI2L_immI_255(rRegL dst, memory mem, immI_255 mask) %{ 6038 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 6039 6040 format %{ "movzbq $dst, $mem\t# int & 0xFF -> long" %} 6041 ins_encode %{ 6042 __ movzbq($dst$$Register, $mem$$Address); 6043 %} 6044 ins_pipe(ialu_reg_mem); 6045 %} 6046 6047 // Load Integer with mask 0xFFFF into Long Register 6048 instruct loadI2L_immI_65535(rRegL dst, memory mem, immI_65535 mask) %{ 6049 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 6050 6051 format %{ "movzwq $dst, $mem\t# int & 0xFFFF -> long" %} 6052 ins_encode %{ 6053 __ movzwq($dst$$Register, $mem$$Address); 6054 %} 6055 ins_pipe(ialu_reg_mem); 6056 %} 6057 6058 // Load Integer with a 32-bit mask into Long Register 6059 instruct loadI2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{ 6060 match(Set dst (ConvI2L (AndI (LoadI mem) mask))); 6061 effect(KILL cr); 6062 6063 format %{ "movl $dst, $mem\t# int & 32-bit mask -> long\n\t" 6064 "andl $dst, $mask" %} 6065 ins_encode %{ 6066 Register Rdst = $dst$$Register; 6067 __ movl(Rdst, $mem$$Address); 6068 __ andl(Rdst, $mask$$constant); 6069 %} 6070 ins_pipe(ialu_reg_mem); 6071 %} 6072 6073 // Load Unsigned Integer into Long Register 6074 instruct loadUI2L(rRegL dst, memory mem) 6075 %{ 6076 match(Set dst (LoadUI2L mem)); 6077 6078 ins_cost(125); 6079 format %{ "movl $dst, $mem\t# uint -> long" %} 6080 6081 ins_encode %{ 6082 __ movl($dst$$Register, $mem$$Address); 6083 %} 6084 6085 ins_pipe(ialu_reg_mem); 6086 %} 6087 6088 // Load Long 6089 instruct loadL(rRegL dst, memory mem) 6090 %{ 6091 match(Set dst (LoadL mem)); 6092 6093 ins_cost(125); 6094 format %{ "movq $dst, $mem\t# long" %} 6095 6096 ins_encode %{ 6097 __ movq($dst$$Register, $mem$$Address); 6098 %} 6099 6100 ins_pipe(ialu_reg_mem); // XXX 6101 %} 6102 6103 // Load Range 6104 instruct loadRange(rRegI dst, memory mem) 6105 %{ 6106 match(Set dst (LoadRange mem)); 6107 6108 ins_cost(125); // XXX 6109 format %{ "movl $dst, $mem\t# range" %} 6110 opcode(0x8B); 6111 ins_encode(REX_reg_mem(dst, mem), OpcP, reg_mem(dst, mem)); 6112 ins_pipe(ialu_reg_mem); 6113 %} 6114 6115 // Load Pointer 6116 instruct loadP(rRegP dst, memory mem) 6117 %{ 6118 match(Set dst (LoadP mem)); 6119 6120 ins_cost(125); // XXX 6121 format %{ "movq $dst, $mem\t# ptr" %} 6122 opcode(0x8B); 6123 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6124 ins_pipe(ialu_reg_mem); // XXX 6125 %} 6126 6127 // Load Compressed Pointer 6128 instruct loadN(rRegN dst, memory mem) 6129 %{ 6130 match(Set dst (LoadN mem)); 6131 6132 ins_cost(125); // XXX 6133 format %{ "movl $dst, $mem\t# compressed ptr" %} 6134 ins_encode %{ 6135 __ movl($dst$$Register, $mem$$Address); 6136 %} 6137 ins_pipe(ialu_reg_mem); // XXX 6138 %} 6139 6140 6141 // Load Klass Pointer 6142 instruct loadKlass(rRegP dst, memory mem) 6143 %{ 6144 match(Set dst (LoadKlass mem)); 6145 6146 ins_cost(125); // XXX 6147 format %{ "movq $dst, $mem\t# class" %} 6148 opcode(0x8B); 6149 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6150 ins_pipe(ialu_reg_mem); // XXX 6151 %} 6152 6153 // Load narrow Klass Pointer 6154 instruct loadNKlass(rRegN dst, memory mem) 6155 %{ 6156 match(Set dst (LoadNKlass mem)); 6157 6158 ins_cost(125); // XXX 6159 format %{ "movl $dst, $mem\t# compressed klass ptr" %} 6160 ins_encode %{ 6161 __ movl($dst$$Register, $mem$$Address); 6162 %} 6163 ins_pipe(ialu_reg_mem); // XXX 6164 %} 6165 6166 // Load Float 6167 instruct loadF(regF dst, memory mem) 6168 %{ 6169 match(Set dst (LoadF mem)); 6170 6171 ins_cost(145); // XXX 6172 format %{ "movss $dst, $mem\t# float" %} 6173 opcode(0xF3, 0x0F, 0x10); 6174 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem)); 6175 ins_pipe(pipe_slow); // XXX 6176 %} 6177 6178 // Load Double 6179 instruct loadD_partial(regD dst, memory mem) 6180 %{ 6181 predicate(!UseXmmLoadAndClearUpper); 6182 match(Set dst (LoadD mem)); 6183 6184 ins_cost(145); // XXX 6185 format %{ "movlpd $dst, $mem\t# double" %} 6186 opcode(0x66, 0x0F, 0x12); 6187 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem)); 6188 ins_pipe(pipe_slow); // XXX 6189 %} 6190 6191 instruct loadD(regD dst, memory mem) 6192 %{ 6193 predicate(UseXmmLoadAndClearUpper); 6194 match(Set dst (LoadD mem)); 6195 6196 ins_cost(145); // XXX 6197 format %{ "movsd $dst, $mem\t# double" %} 6198 opcode(0xF2, 0x0F, 0x10); 6199 ins_encode(OpcP, REX_reg_mem(dst, mem), OpcS, OpcT, reg_mem(dst, mem)); 6200 ins_pipe(pipe_slow); // XXX 6201 %} 6202 6203 // Load Aligned Packed Byte to XMM register 6204 instruct loadA8B(regD dst, memory mem) %{ 6205 match(Set dst (Load8B mem)); 6206 ins_cost(125); 6207 format %{ "MOVQ $dst,$mem\t! packed8B" %} 6208 ins_encode( movq_ld(dst, mem)); 6209 ins_pipe( pipe_slow ); 6210 %} 6211 6212 // Load Aligned Packed Short to XMM register 6213 instruct loadA4S(regD dst, memory mem) %{ 6214 match(Set dst (Load4S mem)); 6215 ins_cost(125); 6216 format %{ "MOVQ $dst,$mem\t! packed4S" %} 6217 ins_encode( movq_ld(dst, mem)); 6218 ins_pipe( pipe_slow ); 6219 %} 6220 6221 // Load Aligned Packed Char to XMM register 6222 instruct loadA4C(regD dst, memory mem) %{ 6223 match(Set dst (Load4C mem)); 6224 ins_cost(125); 6225 format %{ "MOVQ $dst,$mem\t! packed4C" %} 6226 ins_encode( movq_ld(dst, mem)); 6227 ins_pipe( pipe_slow ); 6228 %} 6229 6230 // Load Aligned Packed Integer to XMM register 6231 instruct load2IU(regD dst, memory mem) %{ 6232 match(Set dst (Load2I mem)); 6233 ins_cost(125); 6234 format %{ "MOVQ $dst,$mem\t! packed2I" %} 6235 ins_encode( movq_ld(dst, mem)); 6236 ins_pipe( pipe_slow ); 6237 %} 6238 6239 // Load Aligned Packed Single to XMM 6240 instruct loadA2F(regD dst, memory mem) %{ 6241 match(Set dst (Load2F mem)); 6242 ins_cost(145); 6243 format %{ "MOVQ $dst,$mem\t! packed2F" %} 6244 ins_encode( movq_ld(dst, mem)); 6245 ins_pipe( pipe_slow ); 6246 %} 6247 6248 // Load Effective Address 6249 instruct leaP8(rRegP dst, indOffset8 mem) 6250 %{ 6251 match(Set dst mem); 6252 6253 ins_cost(110); // XXX 6254 format %{ "leaq $dst, $mem\t# ptr 8" %} 6255 opcode(0x8D); 6256 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6257 ins_pipe(ialu_reg_reg_fat); 6258 %} 6259 6260 instruct leaP32(rRegP dst, indOffset32 mem) 6261 %{ 6262 match(Set dst mem); 6263 6264 ins_cost(110); 6265 format %{ "leaq $dst, $mem\t# ptr 32" %} 6266 opcode(0x8D); 6267 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6268 ins_pipe(ialu_reg_reg_fat); 6269 %} 6270 6271 // instruct leaPIdx(rRegP dst, indIndex mem) 6272 // %{ 6273 // match(Set dst mem); 6274 6275 // ins_cost(110); 6276 // format %{ "leaq $dst, $mem\t# ptr idx" %} 6277 // opcode(0x8D); 6278 // ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6279 // ins_pipe(ialu_reg_reg_fat); 6280 // %} 6281 6282 instruct leaPIdxOff(rRegP dst, indIndexOffset mem) 6283 %{ 6284 match(Set dst mem); 6285 6286 ins_cost(110); 6287 format %{ "leaq $dst, $mem\t# ptr idxoff" %} 6288 opcode(0x8D); 6289 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6290 ins_pipe(ialu_reg_reg_fat); 6291 %} 6292 6293 instruct leaPIdxScale(rRegP dst, indIndexScale mem) 6294 %{ 6295 match(Set dst mem); 6296 6297 ins_cost(110); 6298 format %{ "leaq $dst, $mem\t# ptr idxscale" %} 6299 opcode(0x8D); 6300 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6301 ins_pipe(ialu_reg_reg_fat); 6302 %} 6303 6304 instruct leaPIdxScaleOff(rRegP dst, indIndexScaleOffset mem) 6305 %{ 6306 match(Set dst mem); 6307 6308 ins_cost(110); 6309 format %{ "leaq $dst, $mem\t# ptr idxscaleoff" %} 6310 opcode(0x8D); 6311 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6312 ins_pipe(ialu_reg_reg_fat); 6313 %} 6314 6315 instruct leaPPosIdxScaleOff(rRegP dst, indPosIndexScaleOffset mem) 6316 %{ 6317 match(Set dst mem); 6318 6319 ins_cost(110); 6320 format %{ "leaq $dst, $mem\t# ptr posidxscaleoff" %} 6321 opcode(0x8D); 6322 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6323 ins_pipe(ialu_reg_reg_fat); 6324 %} 6325 6326 // Load Effective Address which uses Narrow (32-bits) oop 6327 instruct leaPCompressedOopOffset(rRegP dst, indCompressedOopOffset mem) 6328 %{ 6329 predicate(UseCompressedOops && (Universe::narrow_oop_shift() != 0)); 6330 match(Set dst mem); 6331 6332 ins_cost(110); 6333 format %{ "leaq $dst, $mem\t# ptr compressedoopoff32" %} 6334 opcode(0x8D); 6335 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6336 ins_pipe(ialu_reg_reg_fat); 6337 %} 6338 6339 instruct leaP8Narrow(rRegP dst, indOffset8Narrow mem) 6340 %{ 6341 predicate(Universe::narrow_oop_shift() == 0); 6342 match(Set dst mem); 6343 6344 ins_cost(110); // XXX 6345 format %{ "leaq $dst, $mem\t# ptr off8narrow" %} 6346 opcode(0x8D); 6347 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6348 ins_pipe(ialu_reg_reg_fat); 6349 %} 6350 6351 instruct leaP32Narrow(rRegP dst, indOffset32Narrow mem) 6352 %{ 6353 predicate(Universe::narrow_oop_shift() == 0); 6354 match(Set dst mem); 6355 6356 ins_cost(110); 6357 format %{ "leaq $dst, $mem\t# ptr off32narrow" %} 6358 opcode(0x8D); 6359 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6360 ins_pipe(ialu_reg_reg_fat); 6361 %} 6362 6363 instruct leaPIdxOffNarrow(rRegP dst, indIndexOffsetNarrow mem) 6364 %{ 6365 predicate(Universe::narrow_oop_shift() == 0); 6366 match(Set dst mem); 6367 6368 ins_cost(110); 6369 format %{ "leaq $dst, $mem\t# ptr idxoffnarrow" %} 6370 opcode(0x8D); 6371 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6372 ins_pipe(ialu_reg_reg_fat); 6373 %} 6374 6375 instruct leaPIdxScaleNarrow(rRegP dst, indIndexScaleNarrow mem) 6376 %{ 6377 predicate(Universe::narrow_oop_shift() == 0); 6378 match(Set dst mem); 6379 6380 ins_cost(110); 6381 format %{ "leaq $dst, $mem\t# ptr idxscalenarrow" %} 6382 opcode(0x8D); 6383 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6384 ins_pipe(ialu_reg_reg_fat); 6385 %} 6386 6387 instruct leaPIdxScaleOffNarrow(rRegP dst, indIndexScaleOffsetNarrow mem) 6388 %{ 6389 predicate(Universe::narrow_oop_shift() == 0); 6390 match(Set dst mem); 6391 6392 ins_cost(110); 6393 format %{ "leaq $dst, $mem\t# ptr idxscaleoffnarrow" %} 6394 opcode(0x8D); 6395 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6396 ins_pipe(ialu_reg_reg_fat); 6397 %} 6398 6399 instruct leaPPosIdxScaleOffNarrow(rRegP dst, indPosIndexScaleOffsetNarrow mem) 6400 %{ 6401 predicate(Universe::narrow_oop_shift() == 0); 6402 match(Set dst mem); 6403 6404 ins_cost(110); 6405 format %{ "leaq $dst, $mem\t# ptr posidxscaleoffnarrow" %} 6406 opcode(0x8D); 6407 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 6408 ins_pipe(ialu_reg_reg_fat); 6409 %} 6410 6411 instruct loadConI(rRegI dst, immI src) 6412 %{ 6413 match(Set dst src); 6414 6415 format %{ "movl $dst, $src\t# int" %} 6416 ins_encode(load_immI(dst, src)); 6417 ins_pipe(ialu_reg_fat); // XXX 6418 %} 6419 6420 instruct loadConI0(rRegI dst, immI0 src, rFlagsReg cr) 6421 %{ 6422 match(Set dst src); 6423 effect(KILL cr); 6424 6425 ins_cost(50); 6426 format %{ "xorl $dst, $dst\t# int" %} 6427 opcode(0x33); /* + rd */ 6428 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst)); 6429 ins_pipe(ialu_reg); 6430 %} 6431 6432 instruct loadConL(rRegL dst, immL src) 6433 %{ 6434 match(Set dst src); 6435 6436 ins_cost(150); 6437 format %{ "movq $dst, $src\t# long" %} 6438 ins_encode(load_immL(dst, src)); 6439 ins_pipe(ialu_reg); 6440 %} 6441 6442 instruct loadConL0(rRegL dst, immL0 src, rFlagsReg cr) 6443 %{ 6444 match(Set dst src); 6445 effect(KILL cr); 6446 6447 ins_cost(50); 6448 format %{ "xorl $dst, $dst\t# long" %} 6449 opcode(0x33); /* + rd */ 6450 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst)); 6451 ins_pipe(ialu_reg); // XXX 6452 %} 6453 6454 instruct loadConUL32(rRegL dst, immUL32 src) 6455 %{ 6456 match(Set dst src); 6457 6458 ins_cost(60); 6459 format %{ "movl $dst, $src\t# long (unsigned 32-bit)" %} 6460 ins_encode(load_immUL32(dst, src)); 6461 ins_pipe(ialu_reg); 6462 %} 6463 6464 instruct loadConL32(rRegL dst, immL32 src) 6465 %{ 6466 match(Set dst src); 6467 6468 ins_cost(70); 6469 format %{ "movq $dst, $src\t# long (32-bit)" %} 6470 ins_encode(load_immL32(dst, src)); 6471 ins_pipe(ialu_reg); 6472 %} 6473 6474 instruct loadConP(rRegP dst, immP con) %{ 6475 match(Set dst con); 6476 6477 format %{ "movq $dst, $con\t# ptr" %} 6478 ins_encode(load_immP(dst, con)); 6479 ins_pipe(ialu_reg_fat); // XXX 6480 %} 6481 6482 instruct loadConP0(rRegP dst, immP0 src, rFlagsReg cr) 6483 %{ 6484 match(Set dst src); 6485 effect(KILL cr); 6486 6487 ins_cost(50); 6488 format %{ "xorl $dst, $dst\t# ptr" %} 6489 opcode(0x33); /* + rd */ 6490 ins_encode(REX_reg_reg(dst, dst), OpcP, reg_reg(dst, dst)); 6491 ins_pipe(ialu_reg); 6492 %} 6493 6494 instruct loadConP_poll(rRegP dst, immP_poll src) %{ 6495 match(Set dst src); 6496 format %{ "movq $dst, $src\t!ptr" %} 6497 ins_encode %{ 6498 AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_type); 6499 __ lea($dst$$Register, polling_page); 6500 %} 6501 ins_pipe(ialu_reg_fat); 6502 %} 6503 6504 instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr) 6505 %{ 6506 match(Set dst src); 6507 effect(KILL cr); 6508 6509 ins_cost(60); 6510 format %{ "movl $dst, $src\t# ptr (positive 32-bit)" %} 6511 ins_encode(load_immP31(dst, src)); 6512 ins_pipe(ialu_reg); 6513 %} 6514 6515 instruct loadConF(regF dst, immF con) %{ 6516 match(Set dst con); 6517 ins_cost(125); 6518 format %{ "movss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 6519 ins_encode %{ 6520 __ movflt($dst$$XMMRegister, $constantaddress($con)); 6521 %} 6522 ins_pipe(pipe_slow); 6523 %} 6524 6525 instruct loadConN0(rRegN dst, immN0 src, rFlagsReg cr) %{ 6526 match(Set dst src); 6527 effect(KILL cr); 6528 format %{ "xorq $dst, $src\t# compressed NULL ptr" %} 6529 ins_encode %{ 6530 __ xorq($dst$$Register, $dst$$Register); 6531 %} 6532 ins_pipe(ialu_reg); 6533 %} 6534 6535 instruct loadConN(rRegN dst, immN src) %{ 6536 match(Set dst src); 6537 6538 ins_cost(125); 6539 format %{ "movl $dst, $src\t# compressed ptr" %} 6540 ins_encode %{ 6541 address con = (address)$src$$constant; 6542 if (con == NULL) { 6543 ShouldNotReachHere(); 6544 } else { 6545 __ set_narrow_oop($dst$$Register, (jobject)$src$$constant); 6546 } 6547 %} 6548 ins_pipe(ialu_reg_fat); // XXX 6549 %} 6550 6551 instruct loadConF0(regF dst, immF0 src) 6552 %{ 6553 match(Set dst src); 6554 ins_cost(100); 6555 6556 format %{ "xorps $dst, $dst\t# float 0.0" %} 6557 opcode(0x0F, 0x57); 6558 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst)); 6559 ins_pipe(pipe_slow); 6560 %} 6561 6562 // Use the same format since predicate() can not be used here. 6563 instruct loadConD(regD dst, immD con) %{ 6564 match(Set dst con); 6565 ins_cost(125); 6566 format %{ "movsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 6567 ins_encode %{ 6568 __ movdbl($dst$$XMMRegister, $constantaddress($con)); 6569 %} 6570 ins_pipe(pipe_slow); 6571 %} 6572 6573 instruct loadConD0(regD dst, immD0 src) 6574 %{ 6575 match(Set dst src); 6576 ins_cost(100); 6577 6578 format %{ "xorpd $dst, $dst\t# double 0.0" %} 6579 opcode(0x66, 0x0F, 0x57); 6580 ins_encode(OpcP, REX_reg_reg(dst, dst), OpcS, OpcT, reg_reg(dst, dst)); 6581 ins_pipe(pipe_slow); 6582 %} 6583 6584 instruct loadSSI(rRegI dst, stackSlotI src) 6585 %{ 6586 match(Set dst src); 6587 6588 ins_cost(125); 6589 format %{ "movl $dst, $src\t# int stk" %} 6590 opcode(0x8B); 6591 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 6592 ins_pipe(ialu_reg_mem); 6593 %} 6594 6595 instruct loadSSL(rRegL dst, stackSlotL src) 6596 %{ 6597 match(Set dst src); 6598 6599 ins_cost(125); 6600 format %{ "movq $dst, $src\t# long stk" %} 6601 opcode(0x8B); 6602 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 6603 ins_pipe(ialu_reg_mem); 6604 %} 6605 6606 instruct loadSSP(rRegP dst, stackSlotP src) 6607 %{ 6608 match(Set dst src); 6609 6610 ins_cost(125); 6611 format %{ "movq $dst, $src\t# ptr stk" %} 6612 opcode(0x8B); 6613 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 6614 ins_pipe(ialu_reg_mem); 6615 %} 6616 6617 instruct loadSSF(regF dst, stackSlotF src) 6618 %{ 6619 match(Set dst src); 6620 6621 ins_cost(125); 6622 format %{ "movss $dst, $src\t# float stk" %} 6623 opcode(0xF3, 0x0F, 0x10); 6624 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 6625 ins_pipe(pipe_slow); // XXX 6626 %} 6627 6628 // Use the same format since predicate() can not be used here. 6629 instruct loadSSD(regD dst, stackSlotD src) 6630 %{ 6631 match(Set dst src); 6632 6633 ins_cost(125); 6634 format %{ "movsd $dst, $src\t# double stk" %} 6635 ins_encode %{ 6636 __ movdbl($dst$$XMMRegister, Address(rsp, $src$$disp)); 6637 %} 6638 ins_pipe(pipe_slow); // XXX 6639 %} 6640 6641 // Prefetch instructions. 6642 // Must be safe to execute with invalid address (cannot fault). 6643 6644 instruct prefetchr( memory mem ) %{ 6645 predicate(ReadPrefetchInstr==3); 6646 match(PrefetchRead mem); 6647 ins_cost(125); 6648 6649 format %{ "PREFETCHR $mem\t# Prefetch into level 1 cache" %} 6650 opcode(0x0F, 0x0D); /* Opcode 0F 0D /0 */ 6651 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem)); 6652 ins_pipe(ialu_mem); 6653 %} 6654 6655 instruct prefetchrNTA( memory mem ) %{ 6656 predicate(ReadPrefetchInstr==0); 6657 match(PrefetchRead mem); 6658 ins_cost(125); 6659 6660 format %{ "PREFETCHNTA $mem\t# Prefetch into non-temporal cache for read" %} 6661 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */ 6662 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem)); 6663 ins_pipe(ialu_mem); 6664 %} 6665 6666 instruct prefetchrT0( memory mem ) %{ 6667 predicate(ReadPrefetchInstr==1); 6668 match(PrefetchRead mem); 6669 ins_cost(125); 6670 6671 format %{ "PREFETCHT0 $mem\t# prefetch into L1 and L2 caches for read" %} 6672 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */ 6673 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem)); 6674 ins_pipe(ialu_mem); 6675 %} 6676 6677 instruct prefetchrT2( memory mem ) %{ 6678 predicate(ReadPrefetchInstr==2); 6679 match(PrefetchRead mem); 6680 ins_cost(125); 6681 6682 format %{ "PREFETCHT2 $mem\t# prefetch into L2 caches for read" %} 6683 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */ 6684 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem)); 6685 ins_pipe(ialu_mem); 6686 %} 6687 6688 instruct prefetchw( memory mem ) %{ 6689 predicate(AllocatePrefetchInstr==3); 6690 match(PrefetchWrite mem); 6691 ins_cost(125); 6692 6693 format %{ "PREFETCHW $mem\t# Prefetch into level 1 cache and mark modified" %} 6694 opcode(0x0F, 0x0D); /* Opcode 0F 0D /1 */ 6695 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem)); 6696 ins_pipe(ialu_mem); 6697 %} 6698 6699 instruct prefetchwNTA( memory mem ) %{ 6700 predicate(AllocatePrefetchInstr==0); 6701 match(PrefetchWrite mem); 6702 ins_cost(125); 6703 6704 format %{ "PREFETCHNTA $mem\t# Prefetch to non-temporal cache for write" %} 6705 opcode(0x0F, 0x18); /* Opcode 0F 18 /0 */ 6706 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x00, mem)); 6707 ins_pipe(ialu_mem); 6708 %} 6709 6710 instruct prefetchwT0( memory mem ) %{ 6711 predicate(AllocatePrefetchInstr==1); 6712 match(PrefetchWrite mem); 6713 ins_cost(125); 6714 6715 format %{ "PREFETCHT0 $mem\t# Prefetch to level 1 and 2 caches for write" %} 6716 opcode(0x0F, 0x18); /* Opcode 0F 18 /1 */ 6717 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x01, mem)); 6718 ins_pipe(ialu_mem); 6719 %} 6720 6721 instruct prefetchwT2( memory mem ) %{ 6722 predicate(AllocatePrefetchInstr==2); 6723 match(PrefetchWrite mem); 6724 ins_cost(125); 6725 6726 format %{ "PREFETCHT2 $mem\t# Prefetch to level 2 cache for write" %} 6727 opcode(0x0F, 0x18); /* Opcode 0F 18 /3 */ 6728 ins_encode(REX_mem(mem), OpcP, OpcS, RM_opc_mem(0x03, mem)); 6729 ins_pipe(ialu_mem); 6730 %} 6731 6732 //----------Store Instructions------------------------------------------------- 6733 6734 // Store Byte 6735 instruct storeB(memory mem, rRegI src) 6736 %{ 6737 match(Set mem (StoreB mem src)); 6738 6739 ins_cost(125); // XXX 6740 format %{ "movb $mem, $src\t# byte" %} 6741 opcode(0x88); 6742 ins_encode(REX_breg_mem(src, mem), OpcP, reg_mem(src, mem)); 6743 ins_pipe(ialu_mem_reg); 6744 %} 6745 6746 // Store Char/Short 6747 instruct storeC(memory mem, rRegI src) 6748 %{ 6749 match(Set mem (StoreC mem src)); 6750 6751 ins_cost(125); // XXX 6752 format %{ "movw $mem, $src\t# char/short" %} 6753 opcode(0x89); 6754 ins_encode(SizePrefix, REX_reg_mem(src, mem), OpcP, reg_mem(src, mem)); 6755 ins_pipe(ialu_mem_reg); 6756 %} 6757 6758 // Store Integer 6759 instruct storeI(memory mem, rRegI src) 6760 %{ 6761 match(Set mem (StoreI mem src)); 6762 6763 ins_cost(125); // XXX 6764 format %{ "movl $mem, $src\t# int" %} 6765 opcode(0x89); 6766 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem)); 6767 ins_pipe(ialu_mem_reg); 6768 %} 6769 6770 // Store Long 6771 instruct storeL(memory mem, rRegL src) 6772 %{ 6773 match(Set mem (StoreL mem src)); 6774 6775 ins_cost(125); // XXX 6776 format %{ "movq $mem, $src\t# long" %} 6777 opcode(0x89); 6778 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem)); 6779 ins_pipe(ialu_mem_reg); // XXX 6780 %} 6781 6782 // Store Pointer 6783 instruct storeP(memory mem, any_RegP src) 6784 %{ 6785 match(Set mem (StoreP mem src)); 6786 6787 ins_cost(125); // XXX 6788 format %{ "movq $mem, $src\t# ptr" %} 6789 opcode(0x89); 6790 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem)); 6791 ins_pipe(ialu_mem_reg); 6792 %} 6793 6794 instruct storeImmP0(memory mem, immP0 zero) 6795 %{ 6796 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 6797 match(Set mem (StoreP mem zero)); 6798 6799 ins_cost(125); // XXX 6800 format %{ "movq $mem, R12\t# ptr (R12_heapbase==0)" %} 6801 ins_encode %{ 6802 __ movq($mem$$Address, r12); 6803 %} 6804 ins_pipe(ialu_mem_reg); 6805 %} 6806 6807 // Store NULL Pointer, mark word, or other simple pointer constant. 6808 instruct storeImmP(memory mem, immP31 src) 6809 %{ 6810 match(Set mem (StoreP mem src)); 6811 6812 ins_cost(150); // XXX 6813 format %{ "movq $mem, $src\t# ptr" %} 6814 opcode(0xC7); /* C7 /0 */ 6815 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src)); 6816 ins_pipe(ialu_mem_imm); 6817 %} 6818 6819 // Store Compressed Pointer 6820 instruct storeN(memory mem, rRegN src) 6821 %{ 6822 match(Set mem (StoreN mem src)); 6823 6824 ins_cost(125); // XXX 6825 format %{ "movl $mem, $src\t# compressed ptr" %} 6826 ins_encode %{ 6827 __ movl($mem$$Address, $src$$Register); 6828 %} 6829 ins_pipe(ialu_mem_reg); 6830 %} 6831 6832 instruct storeImmN0(memory mem, immN0 zero) 6833 %{ 6834 predicate(Universe::narrow_oop_base() == NULL); 6835 match(Set mem (StoreN mem zero)); 6836 6837 ins_cost(125); // XXX 6838 format %{ "movl $mem, R12\t# compressed ptr (R12_heapbase==0)" %} 6839 ins_encode %{ 6840 __ movl($mem$$Address, r12); 6841 %} 6842 ins_pipe(ialu_mem_reg); 6843 %} 6844 6845 instruct storeImmN(memory mem, immN src) 6846 %{ 6847 match(Set mem (StoreN mem src)); 6848 6849 ins_cost(150); // XXX 6850 format %{ "movl $mem, $src\t# compressed ptr" %} 6851 ins_encode %{ 6852 address con = (address)$src$$constant; 6853 if (con == NULL) { 6854 __ movl($mem$$Address, (int32_t)0); 6855 } else { 6856 __ set_narrow_oop($mem$$Address, (jobject)$src$$constant); 6857 } 6858 %} 6859 ins_pipe(ialu_mem_imm); 6860 %} 6861 6862 // Store Integer Immediate 6863 instruct storeImmI0(memory mem, immI0 zero) 6864 %{ 6865 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 6866 match(Set mem (StoreI mem zero)); 6867 6868 ins_cost(125); // XXX 6869 format %{ "movl $mem, R12\t# int (R12_heapbase==0)" %} 6870 ins_encode %{ 6871 __ movl($mem$$Address, r12); 6872 %} 6873 ins_pipe(ialu_mem_reg); 6874 %} 6875 6876 instruct storeImmI(memory mem, immI src) 6877 %{ 6878 match(Set mem (StoreI mem src)); 6879 6880 ins_cost(150); 6881 format %{ "movl $mem, $src\t# int" %} 6882 opcode(0xC7); /* C7 /0 */ 6883 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src)); 6884 ins_pipe(ialu_mem_imm); 6885 %} 6886 6887 // Store Long Immediate 6888 instruct storeImmL0(memory mem, immL0 zero) 6889 %{ 6890 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 6891 match(Set mem (StoreL mem zero)); 6892 6893 ins_cost(125); // XXX 6894 format %{ "movq $mem, R12\t# long (R12_heapbase==0)" %} 6895 ins_encode %{ 6896 __ movq($mem$$Address, r12); 6897 %} 6898 ins_pipe(ialu_mem_reg); 6899 %} 6900 6901 instruct storeImmL(memory mem, immL32 src) 6902 %{ 6903 match(Set mem (StoreL mem src)); 6904 6905 ins_cost(150); 6906 format %{ "movq $mem, $src\t# long" %} 6907 opcode(0xC7); /* C7 /0 */ 6908 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32(src)); 6909 ins_pipe(ialu_mem_imm); 6910 %} 6911 6912 // Store Short/Char Immediate 6913 instruct storeImmC0(memory mem, immI0 zero) 6914 %{ 6915 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 6916 match(Set mem (StoreC mem zero)); 6917 6918 ins_cost(125); // XXX 6919 format %{ "movw $mem, R12\t# short/char (R12_heapbase==0)" %} 6920 ins_encode %{ 6921 __ movw($mem$$Address, r12); 6922 %} 6923 ins_pipe(ialu_mem_reg); 6924 %} 6925 6926 instruct storeImmI16(memory mem, immI16 src) 6927 %{ 6928 predicate(UseStoreImmI16); 6929 match(Set mem (StoreC mem src)); 6930 6931 ins_cost(150); 6932 format %{ "movw $mem, $src\t# short/char" %} 6933 opcode(0xC7); /* C7 /0 Same as 32 store immediate with prefix */ 6934 ins_encode(SizePrefix, REX_mem(mem), OpcP, RM_opc_mem(0x00, mem),Con16(src)); 6935 ins_pipe(ialu_mem_imm); 6936 %} 6937 6938 // Store Byte Immediate 6939 instruct storeImmB0(memory mem, immI0 zero) 6940 %{ 6941 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 6942 match(Set mem (StoreB mem zero)); 6943 6944 ins_cost(125); // XXX 6945 format %{ "movb $mem, R12\t# short/char (R12_heapbase==0)" %} 6946 ins_encode %{ 6947 __ movb($mem$$Address, r12); 6948 %} 6949 ins_pipe(ialu_mem_reg); 6950 %} 6951 6952 instruct storeImmB(memory mem, immI8 src) 6953 %{ 6954 match(Set mem (StoreB mem src)); 6955 6956 ins_cost(150); // XXX 6957 format %{ "movb $mem, $src\t# byte" %} 6958 opcode(0xC6); /* C6 /0 */ 6959 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src)); 6960 ins_pipe(ialu_mem_imm); 6961 %} 6962 6963 // Store Aligned Packed Byte XMM register to memory 6964 instruct storeA8B(memory mem, regD src) %{ 6965 match(Set mem (Store8B mem src)); 6966 ins_cost(145); 6967 format %{ "MOVQ $mem,$src\t! packed8B" %} 6968 ins_encode( movq_st(mem, src)); 6969 ins_pipe( pipe_slow ); 6970 %} 6971 6972 // Store Aligned Packed Char/Short XMM register to memory 6973 instruct storeA4C(memory mem, regD src) %{ 6974 match(Set mem (Store4C mem src)); 6975 ins_cost(145); 6976 format %{ "MOVQ $mem,$src\t! packed4C" %} 6977 ins_encode( movq_st(mem, src)); 6978 ins_pipe( pipe_slow ); 6979 %} 6980 6981 // Store Aligned Packed Integer XMM register to memory 6982 instruct storeA2I(memory mem, regD src) %{ 6983 match(Set mem (Store2I mem src)); 6984 ins_cost(145); 6985 format %{ "MOVQ $mem,$src\t! packed2I" %} 6986 ins_encode( movq_st(mem, src)); 6987 ins_pipe( pipe_slow ); 6988 %} 6989 6990 // Store CMS card-mark Immediate 6991 instruct storeImmCM0_reg(memory mem, immI0 zero) 6992 %{ 6993 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 6994 match(Set mem (StoreCM mem zero)); 6995 6996 ins_cost(125); // XXX 6997 format %{ "movb $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %} 6998 ins_encode %{ 6999 __ movb($mem$$Address, r12); 7000 %} 7001 ins_pipe(ialu_mem_reg); 7002 %} 7003 7004 instruct storeImmCM0(memory mem, immI0 src) 7005 %{ 7006 match(Set mem (StoreCM mem src)); 7007 7008 ins_cost(150); // XXX 7009 format %{ "movb $mem, $src\t# CMS card-mark byte 0" %} 7010 opcode(0xC6); /* C6 /0 */ 7011 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con8or32(src)); 7012 ins_pipe(ialu_mem_imm); 7013 %} 7014 7015 // Store Aligned Packed Single Float XMM register to memory 7016 instruct storeA2F(memory mem, regD src) %{ 7017 match(Set mem (Store2F mem src)); 7018 ins_cost(145); 7019 format %{ "MOVQ $mem,$src\t! packed2F" %} 7020 ins_encode( movq_st(mem, src)); 7021 ins_pipe( pipe_slow ); 7022 %} 7023 7024 // Store Float 7025 instruct storeF(memory mem, regF src) 7026 %{ 7027 match(Set mem (StoreF mem src)); 7028 7029 ins_cost(95); // XXX 7030 format %{ "movss $mem, $src\t# float" %} 7031 opcode(0xF3, 0x0F, 0x11); 7032 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem)); 7033 ins_pipe(pipe_slow); // XXX 7034 %} 7035 7036 // Store immediate Float value (it is faster than store from XMM register) 7037 instruct storeF0(memory mem, immF0 zero) 7038 %{ 7039 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 7040 match(Set mem (StoreF mem zero)); 7041 7042 ins_cost(25); // XXX 7043 format %{ "movl $mem, R12\t# float 0. (R12_heapbase==0)" %} 7044 ins_encode %{ 7045 __ movl($mem$$Address, r12); 7046 %} 7047 ins_pipe(ialu_mem_reg); 7048 %} 7049 7050 instruct storeF_imm(memory mem, immF src) 7051 %{ 7052 match(Set mem (StoreF mem src)); 7053 7054 ins_cost(50); 7055 format %{ "movl $mem, $src\t# float" %} 7056 opcode(0xC7); /* C7 /0 */ 7057 ins_encode(REX_mem(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src)); 7058 ins_pipe(ialu_mem_imm); 7059 %} 7060 7061 // Store Double 7062 instruct storeD(memory mem, regD src) 7063 %{ 7064 match(Set mem (StoreD mem src)); 7065 7066 ins_cost(95); // XXX 7067 format %{ "movsd $mem, $src\t# double" %} 7068 opcode(0xF2, 0x0F, 0x11); 7069 ins_encode(OpcP, REX_reg_mem(src, mem), OpcS, OpcT, reg_mem(src, mem)); 7070 ins_pipe(pipe_slow); // XXX 7071 %} 7072 7073 // Store immediate double 0.0 (it is faster than store from XMM register) 7074 instruct storeD0_imm(memory mem, immD0 src) 7075 %{ 7076 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL)); 7077 match(Set mem (StoreD mem src)); 7078 7079 ins_cost(50); 7080 format %{ "movq $mem, $src\t# double 0." %} 7081 opcode(0xC7); /* C7 /0 */ 7082 ins_encode(REX_mem_wide(mem), OpcP, RM_opc_mem(0x00, mem), Con32F_as_bits(src)); 7083 ins_pipe(ialu_mem_imm); 7084 %} 7085 7086 instruct storeD0(memory mem, immD0 zero) 7087 %{ 7088 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 7089 match(Set mem (StoreD mem zero)); 7090 7091 ins_cost(25); // XXX 7092 format %{ "movq $mem, R12\t# double 0. (R12_heapbase==0)" %} 7093 ins_encode %{ 7094 __ movq($mem$$Address, r12); 7095 %} 7096 ins_pipe(ialu_mem_reg); 7097 %} 7098 7099 instruct storeSSI(stackSlotI dst, rRegI src) 7100 %{ 7101 match(Set dst src); 7102 7103 ins_cost(100); 7104 format %{ "movl $dst, $src\t# int stk" %} 7105 opcode(0x89); 7106 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 7107 ins_pipe( ialu_mem_reg ); 7108 %} 7109 7110 instruct storeSSL(stackSlotL dst, rRegL src) 7111 %{ 7112 match(Set dst src); 7113 7114 ins_cost(100); 7115 format %{ "movq $dst, $src\t# long stk" %} 7116 opcode(0x89); 7117 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 7118 ins_pipe(ialu_mem_reg); 7119 %} 7120 7121 instruct storeSSP(stackSlotP dst, rRegP src) 7122 %{ 7123 match(Set dst src); 7124 7125 ins_cost(100); 7126 format %{ "movq $dst, $src\t# ptr stk" %} 7127 opcode(0x89); 7128 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 7129 ins_pipe(ialu_mem_reg); 7130 %} 7131 7132 instruct storeSSF(stackSlotF dst, regF src) 7133 %{ 7134 match(Set dst src); 7135 7136 ins_cost(95); // XXX 7137 format %{ "movss $dst, $src\t# float stk" %} 7138 opcode(0xF3, 0x0F, 0x11); 7139 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst)); 7140 ins_pipe(pipe_slow); // XXX 7141 %} 7142 7143 instruct storeSSD(stackSlotD dst, regD src) 7144 %{ 7145 match(Set dst src); 7146 7147 ins_cost(95); // XXX 7148 format %{ "movsd $dst, $src\t# double stk" %} 7149 opcode(0xF2, 0x0F, 0x11); 7150 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst)); 7151 ins_pipe(pipe_slow); // XXX 7152 %} 7153 7154 //----------BSWAP Instructions------------------------------------------------- 7155 instruct bytes_reverse_int(rRegI dst) %{ 7156 match(Set dst (ReverseBytesI dst)); 7157 7158 format %{ "bswapl $dst" %} 7159 opcode(0x0F, 0xC8); /*Opcode 0F /C8 */ 7160 ins_encode( REX_reg(dst), OpcP, opc2_reg(dst) ); 7161 ins_pipe( ialu_reg ); 7162 %} 7163 7164 instruct bytes_reverse_long(rRegL dst) %{ 7165 match(Set dst (ReverseBytesL dst)); 7166 7167 format %{ "bswapq $dst" %} 7168 7169 opcode(0x0F, 0xC8); /* Opcode 0F /C8 */ 7170 ins_encode( REX_reg_wide(dst), OpcP, opc2_reg(dst) ); 7171 ins_pipe( ialu_reg); 7172 %} 7173 7174 instruct bytes_reverse_unsigned_short(rRegI dst) %{ 7175 match(Set dst (ReverseBytesUS dst)); 7176 7177 format %{ "bswapl $dst\n\t" 7178 "shrl $dst,16\n\t" %} 7179 ins_encode %{ 7180 __ bswapl($dst$$Register); 7181 __ shrl($dst$$Register, 16); 7182 %} 7183 ins_pipe( ialu_reg ); 7184 %} 7185 7186 instruct bytes_reverse_short(rRegI dst) %{ 7187 match(Set dst (ReverseBytesS dst)); 7188 7189 format %{ "bswapl $dst\n\t" 7190 "sar $dst,16\n\t" %} 7191 ins_encode %{ 7192 __ bswapl($dst$$Register); 7193 __ sarl($dst$$Register, 16); 7194 %} 7195 ins_pipe( ialu_reg ); 7196 %} 7197 7198 //---------- Zeros Count Instructions ------------------------------------------ 7199 7200 instruct countLeadingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{ 7201 predicate(UseCountLeadingZerosInstruction); 7202 match(Set dst (CountLeadingZerosI src)); 7203 effect(KILL cr); 7204 7205 format %{ "lzcntl $dst, $src\t# count leading zeros (int)" %} 7206 ins_encode %{ 7207 __ lzcntl($dst$$Register, $src$$Register); 7208 %} 7209 ins_pipe(ialu_reg); 7210 %} 7211 7212 instruct countLeadingZerosI_bsr(rRegI dst, rRegI src, rFlagsReg cr) %{ 7213 predicate(!UseCountLeadingZerosInstruction); 7214 match(Set dst (CountLeadingZerosI src)); 7215 effect(KILL cr); 7216 7217 format %{ "bsrl $dst, $src\t# count leading zeros (int)\n\t" 7218 "jnz skip\n\t" 7219 "movl $dst, -1\n" 7220 "skip:\n\t" 7221 "negl $dst\n\t" 7222 "addl $dst, 31" %} 7223 ins_encode %{ 7224 Register Rdst = $dst$$Register; 7225 Register Rsrc = $src$$Register; 7226 Label skip; 7227 __ bsrl(Rdst, Rsrc); 7228 __ jccb(Assembler::notZero, skip); 7229 __ movl(Rdst, -1); 7230 __ bind(skip); 7231 __ negl(Rdst); 7232 __ addl(Rdst, BitsPerInt - 1); 7233 %} 7234 ins_pipe(ialu_reg); 7235 %} 7236 7237 instruct countLeadingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{ 7238 predicate(UseCountLeadingZerosInstruction); 7239 match(Set dst (CountLeadingZerosL src)); 7240 effect(KILL cr); 7241 7242 format %{ "lzcntq $dst, $src\t# count leading zeros (long)" %} 7243 ins_encode %{ 7244 __ lzcntq($dst$$Register, $src$$Register); 7245 %} 7246 ins_pipe(ialu_reg); 7247 %} 7248 7249 instruct countLeadingZerosL_bsr(rRegI dst, rRegL src, rFlagsReg cr) %{ 7250 predicate(!UseCountLeadingZerosInstruction); 7251 match(Set dst (CountLeadingZerosL src)); 7252 effect(KILL cr); 7253 7254 format %{ "bsrq $dst, $src\t# count leading zeros (long)\n\t" 7255 "jnz skip\n\t" 7256 "movl $dst, -1\n" 7257 "skip:\n\t" 7258 "negl $dst\n\t" 7259 "addl $dst, 63" %} 7260 ins_encode %{ 7261 Register Rdst = $dst$$Register; 7262 Register Rsrc = $src$$Register; 7263 Label skip; 7264 __ bsrq(Rdst, Rsrc); 7265 __ jccb(Assembler::notZero, skip); 7266 __ movl(Rdst, -1); 7267 __ bind(skip); 7268 __ negl(Rdst); 7269 __ addl(Rdst, BitsPerLong - 1); 7270 %} 7271 ins_pipe(ialu_reg); 7272 %} 7273 7274 instruct countTrailingZerosI(rRegI dst, rRegI src, rFlagsReg cr) %{ 7275 match(Set dst (CountTrailingZerosI src)); 7276 effect(KILL cr); 7277 7278 format %{ "bsfl $dst, $src\t# count trailing zeros (int)\n\t" 7279 "jnz done\n\t" 7280 "movl $dst, 32\n" 7281 "done:" %} 7282 ins_encode %{ 7283 Register Rdst = $dst$$Register; 7284 Label done; 7285 __ bsfl(Rdst, $src$$Register); 7286 __ jccb(Assembler::notZero, done); 7287 __ movl(Rdst, BitsPerInt); 7288 __ bind(done); 7289 %} 7290 ins_pipe(ialu_reg); 7291 %} 7292 7293 instruct countTrailingZerosL(rRegI dst, rRegL src, rFlagsReg cr) %{ 7294 match(Set dst (CountTrailingZerosL src)); 7295 effect(KILL cr); 7296 7297 format %{ "bsfq $dst, $src\t# count trailing zeros (long)\n\t" 7298 "jnz done\n\t" 7299 "movl $dst, 64\n" 7300 "done:" %} 7301 ins_encode %{ 7302 Register Rdst = $dst$$Register; 7303 Label done; 7304 __ bsfq(Rdst, $src$$Register); 7305 __ jccb(Assembler::notZero, done); 7306 __ movl(Rdst, BitsPerLong); 7307 __ bind(done); 7308 %} 7309 ins_pipe(ialu_reg); 7310 %} 7311 7312 7313 //---------- Population Count Instructions ------------------------------------- 7314 7315 instruct popCountI(rRegI dst, rRegI src) %{ 7316 predicate(UsePopCountInstruction); 7317 match(Set dst (PopCountI src)); 7318 7319 format %{ "popcnt $dst, $src" %} 7320 ins_encode %{ 7321 __ popcntl($dst$$Register, $src$$Register); 7322 %} 7323 ins_pipe(ialu_reg); 7324 %} 7325 7326 instruct popCountI_mem(rRegI dst, memory mem) %{ 7327 predicate(UsePopCountInstruction); 7328 match(Set dst (PopCountI (LoadI mem))); 7329 7330 format %{ "popcnt $dst, $mem" %} 7331 ins_encode %{ 7332 __ popcntl($dst$$Register, $mem$$Address); 7333 %} 7334 ins_pipe(ialu_reg); 7335 %} 7336 7337 // Note: Long.bitCount(long) returns an int. 7338 instruct popCountL(rRegI dst, rRegL src) %{ 7339 predicate(UsePopCountInstruction); 7340 match(Set dst (PopCountL src)); 7341 7342 format %{ "popcnt $dst, $src" %} 7343 ins_encode %{ 7344 __ popcntq($dst$$Register, $src$$Register); 7345 %} 7346 ins_pipe(ialu_reg); 7347 %} 7348 7349 // Note: Long.bitCount(long) returns an int. 7350 instruct popCountL_mem(rRegI dst, memory mem) %{ 7351 predicate(UsePopCountInstruction); 7352 match(Set dst (PopCountL (LoadL mem))); 7353 7354 format %{ "popcnt $dst, $mem" %} 7355 ins_encode %{ 7356 __ popcntq($dst$$Register, $mem$$Address); 7357 %} 7358 ins_pipe(ialu_reg); 7359 %} 7360 7361 7362 //----------MemBar Instructions----------------------------------------------- 7363 // Memory barrier flavors 7364 7365 instruct membar_acquire() 7366 %{ 7367 match(MemBarAcquire); 7368 ins_cost(0); 7369 7370 size(0); 7371 format %{ "MEMBAR-acquire ! (empty encoding)" %} 7372 ins_encode(); 7373 ins_pipe(empty); 7374 %} 7375 7376 instruct membar_acquire_lock() 7377 %{ 7378 match(MemBarAcquire); 7379 predicate(Matcher::prior_fast_lock(n)); 7380 ins_cost(0); 7381 7382 size(0); 7383 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock so empty encoding)" %} 7384 ins_encode(); 7385 ins_pipe(empty); 7386 %} 7387 7388 instruct membar_release() 7389 %{ 7390 match(MemBarRelease); 7391 ins_cost(0); 7392 7393 size(0); 7394 format %{ "MEMBAR-release ! (empty encoding)" %} 7395 ins_encode(); 7396 ins_pipe(empty); 7397 %} 7398 7399 instruct membar_release_lock() 7400 %{ 7401 match(MemBarRelease); 7402 predicate(Matcher::post_fast_unlock(n)); 7403 ins_cost(0); 7404 7405 size(0); 7406 format %{ "MEMBAR-release (a FastUnlock follows so empty encoding)" %} 7407 ins_encode(); 7408 ins_pipe(empty); 7409 %} 7410 7411 instruct membar_volatile(rFlagsReg cr) %{ 7412 match(MemBarVolatile); 7413 effect(KILL cr); 7414 ins_cost(400); 7415 7416 format %{ 7417 $$template 7418 if (os::is_MP()) { 7419 $$emit$$"lock addl [rsp + #0], 0\t! membar_volatile" 7420 } else { 7421 $$emit$$"MEMBAR-volatile ! (empty encoding)" 7422 } 7423 %} 7424 ins_encode %{ 7425 __ membar(Assembler::StoreLoad); 7426 %} 7427 ins_pipe(pipe_slow); 7428 %} 7429 7430 instruct unnecessary_membar_volatile() 7431 %{ 7432 match(MemBarVolatile); 7433 predicate(Matcher::post_store_load_barrier(n)); 7434 ins_cost(0); 7435 7436 size(0); 7437 format %{ "MEMBAR-volatile (unnecessary so empty encoding)" %} 7438 ins_encode(); 7439 ins_pipe(empty); 7440 %} 7441 7442 //----------Move Instructions-------------------------------------------------- 7443 7444 instruct castX2P(rRegP dst, rRegL src) 7445 %{ 7446 match(Set dst (CastX2P src)); 7447 7448 format %{ "movq $dst, $src\t# long->ptr" %} 7449 ins_encode(enc_copy_wide(dst, src)); 7450 ins_pipe(ialu_reg_reg); // XXX 7451 %} 7452 7453 instruct castP2X(rRegL dst, rRegP src) 7454 %{ 7455 match(Set dst (CastP2X src)); 7456 7457 format %{ "movq $dst, $src\t# ptr -> long" %} 7458 ins_encode(enc_copy_wide(dst, src)); 7459 ins_pipe(ialu_reg_reg); // XXX 7460 %} 7461 7462 7463 // Convert oop pointer into compressed form 7464 instruct encodeHeapOop(rRegN dst, rRegP src, rFlagsReg cr) %{ 7465 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); 7466 match(Set dst (EncodeP src)); 7467 effect(KILL cr); 7468 format %{ "encode_heap_oop $dst,$src" %} 7469 ins_encode %{ 7470 Register s = $src$$Register; 7471 Register d = $dst$$Register; 7472 if (s != d) { 7473 __ movq(d, s); 7474 } 7475 __ encode_heap_oop(d); 7476 %} 7477 ins_pipe(ialu_reg_long); 7478 %} 7479 7480 instruct encodeHeapOop_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ 7481 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); 7482 match(Set dst (EncodeP src)); 7483 effect(KILL cr); 7484 format %{ "encode_heap_oop_not_null $dst,$src" %} 7485 ins_encode %{ 7486 __ encode_heap_oop_not_null($dst$$Register, $src$$Register); 7487 %} 7488 ins_pipe(ialu_reg_long); 7489 %} 7490 7491 instruct decodeHeapOop(rRegP dst, rRegN src, rFlagsReg cr) %{ 7492 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull && 7493 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant); 7494 match(Set dst (DecodeN src)); 7495 effect(KILL cr); 7496 format %{ "decode_heap_oop $dst,$src" %} 7497 ins_encode %{ 7498 Register s = $src$$Register; 7499 Register d = $dst$$Register; 7500 if (s != d) { 7501 __ movq(d, s); 7502 } 7503 __ decode_heap_oop(d); 7504 %} 7505 ins_pipe(ialu_reg_long); 7506 %} 7507 7508 instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ 7509 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull || 7510 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant); 7511 match(Set dst (DecodeN src)); 7512 effect(KILL cr); 7513 format %{ "decode_heap_oop_not_null $dst,$src" %} 7514 ins_encode %{ 7515 Register s = $src$$Register; 7516 Register d = $dst$$Register; 7517 if (s != d) { 7518 __ decode_heap_oop_not_null(d, s); 7519 } else { 7520 __ decode_heap_oop_not_null(d); 7521 } 7522 %} 7523 ins_pipe(ialu_reg_long); 7524 %} 7525 7526 7527 //----------Conditional Move--------------------------------------------------- 7528 // Jump 7529 // dummy instruction for generating temp registers 7530 instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{ 7531 match(Jump (LShiftL switch_val shift)); 7532 ins_cost(350); 7533 predicate(false); 7534 effect(TEMP dest); 7535 7536 format %{ "leaq $dest, [$constantaddress]\n\t" 7537 "jmp [$dest + $switch_val << $shift]\n\t" %} 7538 ins_encode %{ 7539 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 7540 // to do that and the compiler is using that register as one it can allocate. 7541 // So we build it all by hand. 7542 // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant); 7543 // ArrayAddress dispatch(table, index); 7544 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant); 7545 __ lea($dest$$Register, $constantaddress); 7546 __ jmp(dispatch); 7547 %} 7548 ins_pipe(pipe_jmp); 7549 %} 7550 7551 instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{ 7552 match(Jump (AddL (LShiftL switch_val shift) offset)); 7553 ins_cost(350); 7554 effect(TEMP dest); 7555 7556 format %{ "leaq $dest, [$constantaddress]\n\t" 7557 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %} 7558 ins_encode %{ 7559 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 7560 // to do that and the compiler is using that register as one it can allocate. 7561 // So we build it all by hand. 7562 // Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 7563 // ArrayAddress dispatch(table, index); 7564 Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 7565 __ lea($dest$$Register, $constantaddress); 7566 __ jmp(dispatch); 7567 %} 7568 ins_pipe(pipe_jmp); 7569 %} 7570 7571 instruct jumpXtnd(rRegL switch_val, rRegI dest) %{ 7572 match(Jump switch_val); 7573 ins_cost(350); 7574 effect(TEMP dest); 7575 7576 format %{ "leaq $dest, [$constantaddress]\n\t" 7577 "jmp [$dest + $switch_val]\n\t" %} 7578 ins_encode %{ 7579 // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 7580 // to do that and the compiler is using that register as one it can allocate. 7581 // So we build it all by hand. 7582 // Address index(noreg, switch_reg, Address::times_1); 7583 // ArrayAddress dispatch(table, index); 7584 Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1); 7585 __ lea($dest$$Register, $constantaddress); 7586 __ jmp(dispatch); 7587 %} 7588 ins_pipe(pipe_jmp); 7589 %} 7590 7591 // Conditional move 7592 instruct cmovI_reg(rRegI dst, rRegI src, rFlagsReg cr, cmpOp cop) 7593 %{ 7594 match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); 7595 7596 ins_cost(200); // XXX 7597 format %{ "cmovl$cop $dst, $src\t# signed, int" %} 7598 opcode(0x0F, 0x40); 7599 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7600 ins_pipe(pipe_cmov_reg); 7601 %} 7602 7603 instruct cmovI_regU(cmpOpU cop, rFlagsRegU cr, rRegI dst, rRegI src) %{ 7604 match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); 7605 7606 ins_cost(200); // XXX 7607 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %} 7608 opcode(0x0F, 0x40); 7609 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7610 ins_pipe(pipe_cmov_reg); 7611 %} 7612 7613 instruct cmovI_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, rRegI src) %{ 7614 match(Set dst (CMoveI (Binary cop cr) (Binary dst src))); 7615 ins_cost(200); 7616 expand %{ 7617 cmovI_regU(cop, cr, dst, src); 7618 %} 7619 %} 7620 7621 // Conditional move 7622 instruct cmovI_mem(cmpOp cop, rFlagsReg cr, rRegI dst, memory src) %{ 7623 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); 7624 7625 ins_cost(250); // XXX 7626 format %{ "cmovl$cop $dst, $src\t# signed, int" %} 7627 opcode(0x0F, 0x40); 7628 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src)); 7629 ins_pipe(pipe_cmov_mem); 7630 %} 7631 7632 // Conditional move 7633 instruct cmovI_memU(cmpOpU cop, rFlagsRegU cr, rRegI dst, memory src) 7634 %{ 7635 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); 7636 7637 ins_cost(250); // XXX 7638 format %{ "cmovl$cop $dst, $src\t# unsigned, int" %} 7639 opcode(0x0F, 0x40); 7640 ins_encode(REX_reg_mem(dst, src), enc_cmov(cop), reg_mem(dst, src)); 7641 ins_pipe(pipe_cmov_mem); 7642 %} 7643 7644 instruct cmovI_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegI dst, memory src) %{ 7645 match(Set dst (CMoveI (Binary cop cr) (Binary dst (LoadI src)))); 7646 ins_cost(250); 7647 expand %{ 7648 cmovI_memU(cop, cr, dst, src); 7649 %} 7650 %} 7651 7652 // Conditional move 7653 instruct cmovN_reg(rRegN dst, rRegN src, rFlagsReg cr, cmpOp cop) 7654 %{ 7655 match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); 7656 7657 ins_cost(200); // XXX 7658 format %{ "cmovl$cop $dst, $src\t# signed, compressed ptr" %} 7659 opcode(0x0F, 0x40); 7660 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7661 ins_pipe(pipe_cmov_reg); 7662 %} 7663 7664 // Conditional move 7665 instruct cmovN_regU(cmpOpU cop, rFlagsRegU cr, rRegN dst, rRegN src) 7666 %{ 7667 match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); 7668 7669 ins_cost(200); // XXX 7670 format %{ "cmovl$cop $dst, $src\t# unsigned, compressed ptr" %} 7671 opcode(0x0F, 0x40); 7672 ins_encode(REX_reg_reg(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7673 ins_pipe(pipe_cmov_reg); 7674 %} 7675 7676 instruct cmovN_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegN dst, rRegN src) %{ 7677 match(Set dst (CMoveN (Binary cop cr) (Binary dst src))); 7678 ins_cost(200); 7679 expand %{ 7680 cmovN_regU(cop, cr, dst, src); 7681 %} 7682 %} 7683 7684 // Conditional move 7685 instruct cmovP_reg(rRegP dst, rRegP src, rFlagsReg cr, cmpOp cop) 7686 %{ 7687 match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); 7688 7689 ins_cost(200); // XXX 7690 format %{ "cmovq$cop $dst, $src\t# signed, ptr" %} 7691 opcode(0x0F, 0x40); 7692 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7693 ins_pipe(pipe_cmov_reg); // XXX 7694 %} 7695 7696 // Conditional move 7697 instruct cmovP_regU(cmpOpU cop, rFlagsRegU cr, rRegP dst, rRegP src) 7698 %{ 7699 match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); 7700 7701 ins_cost(200); // XXX 7702 format %{ "cmovq$cop $dst, $src\t# unsigned, ptr" %} 7703 opcode(0x0F, 0x40); 7704 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7705 ins_pipe(pipe_cmov_reg); // XXX 7706 %} 7707 7708 instruct cmovP_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegP dst, rRegP src) %{ 7709 match(Set dst (CMoveP (Binary cop cr) (Binary dst src))); 7710 ins_cost(200); 7711 expand %{ 7712 cmovP_regU(cop, cr, dst, src); 7713 %} 7714 %} 7715 7716 // DISABLED: Requires the ADLC to emit a bottom_type call that 7717 // correctly meets the two pointer arguments; one is an incoming 7718 // register but the other is a memory operand. ALSO appears to 7719 // be buggy with implicit null checks. 7720 // 7721 //// Conditional move 7722 //instruct cmovP_mem(cmpOp cop, rFlagsReg cr, rRegP dst, memory src) 7723 //%{ 7724 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src)))); 7725 // ins_cost(250); 7726 // format %{ "CMOV$cop $dst,$src\t# ptr" %} 7727 // opcode(0x0F,0x40); 7728 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) ); 7729 // ins_pipe( pipe_cmov_mem ); 7730 //%} 7731 // 7732 //// Conditional move 7733 //instruct cmovP_memU(cmpOpU cop, rFlagsRegU cr, rRegP dst, memory src) 7734 //%{ 7735 // match(Set dst (CMoveP (Binary cop cr) (Binary dst (LoadP src)))); 7736 // ins_cost(250); 7737 // format %{ "CMOV$cop $dst,$src\t# ptr" %} 7738 // opcode(0x0F,0x40); 7739 // ins_encode( enc_cmov(cop), reg_mem( dst, src ) ); 7740 // ins_pipe( pipe_cmov_mem ); 7741 //%} 7742 7743 instruct cmovL_reg(cmpOp cop, rFlagsReg cr, rRegL dst, rRegL src) 7744 %{ 7745 match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); 7746 7747 ins_cost(200); // XXX 7748 format %{ "cmovq$cop $dst, $src\t# signed, long" %} 7749 opcode(0x0F, 0x40); 7750 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7751 ins_pipe(pipe_cmov_reg); // XXX 7752 %} 7753 7754 instruct cmovL_mem(cmpOp cop, rFlagsReg cr, rRegL dst, memory src) 7755 %{ 7756 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); 7757 7758 ins_cost(200); // XXX 7759 format %{ "cmovq$cop $dst, $src\t# signed, long" %} 7760 opcode(0x0F, 0x40); 7761 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src)); 7762 ins_pipe(pipe_cmov_mem); // XXX 7763 %} 7764 7765 instruct cmovL_regU(cmpOpU cop, rFlagsRegU cr, rRegL dst, rRegL src) 7766 %{ 7767 match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); 7768 7769 ins_cost(200); // XXX 7770 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %} 7771 opcode(0x0F, 0x40); 7772 ins_encode(REX_reg_reg_wide(dst, src), enc_cmov(cop), reg_reg(dst, src)); 7773 ins_pipe(pipe_cmov_reg); // XXX 7774 %} 7775 7776 instruct cmovL_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, rRegL src) %{ 7777 match(Set dst (CMoveL (Binary cop cr) (Binary dst src))); 7778 ins_cost(200); 7779 expand %{ 7780 cmovL_regU(cop, cr, dst, src); 7781 %} 7782 %} 7783 7784 instruct cmovL_memU(cmpOpU cop, rFlagsRegU cr, rRegL dst, memory src) 7785 %{ 7786 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); 7787 7788 ins_cost(200); // XXX 7789 format %{ "cmovq$cop $dst, $src\t# unsigned, long" %} 7790 opcode(0x0F, 0x40); 7791 ins_encode(REX_reg_mem_wide(dst, src), enc_cmov(cop), reg_mem(dst, src)); 7792 ins_pipe(pipe_cmov_mem); // XXX 7793 %} 7794 7795 instruct cmovL_memUCF(cmpOpUCF cop, rFlagsRegUCF cr, rRegL dst, memory src) %{ 7796 match(Set dst (CMoveL (Binary cop cr) (Binary dst (LoadL src)))); 7797 ins_cost(200); 7798 expand %{ 7799 cmovL_memU(cop, cr, dst, src); 7800 %} 7801 %} 7802 7803 instruct cmovF_reg(cmpOp cop, rFlagsReg cr, regF dst, regF src) 7804 %{ 7805 match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); 7806 7807 ins_cost(200); // XXX 7808 format %{ "jn$cop skip\t# signed cmove float\n\t" 7809 "movss $dst, $src\n" 7810 "skip:" %} 7811 ins_encode(enc_cmovf_branch(cop, dst, src)); 7812 ins_pipe(pipe_slow); 7813 %} 7814 7815 // instruct cmovF_mem(cmpOp cop, rFlagsReg cr, regF dst, memory src) 7816 // %{ 7817 // match(Set dst (CMoveF (Binary cop cr) (Binary dst (LoadL src)))); 7818 7819 // ins_cost(200); // XXX 7820 // format %{ "jn$cop skip\t# signed cmove float\n\t" 7821 // "movss $dst, $src\n" 7822 // "skip:" %} 7823 // ins_encode(enc_cmovf_mem_branch(cop, dst, src)); 7824 // ins_pipe(pipe_slow); 7825 // %} 7826 7827 instruct cmovF_regU(cmpOpU cop, rFlagsRegU cr, regF dst, regF src) 7828 %{ 7829 match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); 7830 7831 ins_cost(200); // XXX 7832 format %{ "jn$cop skip\t# unsigned cmove float\n\t" 7833 "movss $dst, $src\n" 7834 "skip:" %} 7835 ins_encode(enc_cmovf_branch(cop, dst, src)); 7836 ins_pipe(pipe_slow); 7837 %} 7838 7839 instruct cmovF_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regF dst, regF src) %{ 7840 match(Set dst (CMoveF (Binary cop cr) (Binary dst src))); 7841 ins_cost(200); 7842 expand %{ 7843 cmovF_regU(cop, cr, dst, src); 7844 %} 7845 %} 7846 7847 instruct cmovD_reg(cmpOp cop, rFlagsReg cr, regD dst, regD src) 7848 %{ 7849 match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); 7850 7851 ins_cost(200); // XXX 7852 format %{ "jn$cop skip\t# signed cmove double\n\t" 7853 "movsd $dst, $src\n" 7854 "skip:" %} 7855 ins_encode(enc_cmovd_branch(cop, dst, src)); 7856 ins_pipe(pipe_slow); 7857 %} 7858 7859 instruct cmovD_regU(cmpOpU cop, rFlagsRegU cr, regD dst, regD src) 7860 %{ 7861 match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); 7862 7863 ins_cost(200); // XXX 7864 format %{ "jn$cop skip\t# unsigned cmove double\n\t" 7865 "movsd $dst, $src\n" 7866 "skip:" %} 7867 ins_encode(enc_cmovd_branch(cop, dst, src)); 7868 ins_pipe(pipe_slow); 7869 %} 7870 7871 instruct cmovD_regUCF(cmpOpUCF cop, rFlagsRegUCF cr, regD dst, regD src) %{ 7872 match(Set dst (CMoveD (Binary cop cr) (Binary dst src))); 7873 ins_cost(200); 7874 expand %{ 7875 cmovD_regU(cop, cr, dst, src); 7876 %} 7877 %} 7878 7879 //----------Arithmetic Instructions-------------------------------------------- 7880 //----------Addition Instructions---------------------------------------------- 7881 7882 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 7883 %{ 7884 match(Set dst (AddI dst src)); 7885 effect(KILL cr); 7886 7887 format %{ "addl $dst, $src\t# int" %} 7888 opcode(0x03); 7889 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src)); 7890 ins_pipe(ialu_reg_reg); 7891 %} 7892 7893 instruct addI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 7894 %{ 7895 match(Set dst (AddI dst src)); 7896 effect(KILL cr); 7897 7898 format %{ "addl $dst, $src\t# int" %} 7899 opcode(0x81, 0x00); /* /0 id */ 7900 ins_encode(OpcSErm(dst, src), Con8or32(src)); 7901 ins_pipe( ialu_reg ); 7902 %} 7903 7904 instruct addI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 7905 %{ 7906 match(Set dst (AddI dst (LoadI src))); 7907 effect(KILL cr); 7908 7909 ins_cost(125); // XXX 7910 format %{ "addl $dst, $src\t# int" %} 7911 opcode(0x03); 7912 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 7913 ins_pipe(ialu_reg_mem); 7914 %} 7915 7916 instruct addI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 7917 %{ 7918 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7919 effect(KILL cr); 7920 7921 ins_cost(150); // XXX 7922 format %{ "addl $dst, $src\t# int" %} 7923 opcode(0x01); /* Opcode 01 /r */ 7924 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 7925 ins_pipe(ialu_mem_reg); 7926 %} 7927 7928 instruct addI_mem_imm(memory dst, immI src, rFlagsReg cr) 7929 %{ 7930 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7931 effect(KILL cr); 7932 7933 ins_cost(125); // XXX 7934 format %{ "addl $dst, $src\t# int" %} 7935 opcode(0x81); /* Opcode 81 /0 id */ 7936 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src)); 7937 ins_pipe(ialu_mem_imm); 7938 %} 7939 7940 instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr) 7941 %{ 7942 predicate(UseIncDec); 7943 match(Set dst (AddI dst src)); 7944 effect(KILL cr); 7945 7946 format %{ "incl $dst\t# int" %} 7947 opcode(0xFF, 0x00); // FF /0 7948 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 7949 ins_pipe(ialu_reg); 7950 %} 7951 7952 instruct incI_mem(memory dst, immI1 src, rFlagsReg cr) 7953 %{ 7954 predicate(UseIncDec); 7955 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7956 effect(KILL cr); 7957 7958 ins_cost(125); // XXX 7959 format %{ "incl $dst\t# int" %} 7960 opcode(0xFF); /* Opcode FF /0 */ 7961 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x00, dst)); 7962 ins_pipe(ialu_mem_imm); 7963 %} 7964 7965 // XXX why does that use AddI 7966 instruct decI_rReg(rRegI dst, immI_M1 src, rFlagsReg cr) 7967 %{ 7968 predicate(UseIncDec); 7969 match(Set dst (AddI dst src)); 7970 effect(KILL cr); 7971 7972 format %{ "decl $dst\t# int" %} 7973 opcode(0xFF, 0x01); // FF /1 7974 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 7975 ins_pipe(ialu_reg); 7976 %} 7977 7978 // XXX why does that use AddI 7979 instruct decI_mem(memory dst, immI_M1 src, rFlagsReg cr) 7980 %{ 7981 predicate(UseIncDec); 7982 match(Set dst (StoreI dst (AddI (LoadI dst) src))); 7983 effect(KILL cr); 7984 7985 ins_cost(125); // XXX 7986 format %{ "decl $dst\t# int" %} 7987 opcode(0xFF); /* Opcode FF /1 */ 7988 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(0x01, dst)); 7989 ins_pipe(ialu_mem_imm); 7990 %} 7991 7992 instruct leaI_rReg_immI(rRegI dst, rRegI src0, immI src1) 7993 %{ 7994 match(Set dst (AddI src0 src1)); 7995 7996 ins_cost(110); 7997 format %{ "addr32 leal $dst, [$src0 + $src1]\t# int" %} 7998 opcode(0x8D); /* 0x8D /r */ 7999 ins_encode(Opcode(0x67), REX_reg_reg(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX 8000 ins_pipe(ialu_reg_reg); 8001 %} 8002 8003 instruct addL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 8004 %{ 8005 match(Set dst (AddL dst src)); 8006 effect(KILL cr); 8007 8008 format %{ "addq $dst, $src\t# long" %} 8009 opcode(0x03); 8010 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 8011 ins_pipe(ialu_reg_reg); 8012 %} 8013 8014 instruct addL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 8015 %{ 8016 match(Set dst (AddL dst src)); 8017 effect(KILL cr); 8018 8019 format %{ "addq $dst, $src\t# long" %} 8020 opcode(0x81, 0x00); /* /0 id */ 8021 ins_encode(OpcSErm_wide(dst, src), Con8or32(src)); 8022 ins_pipe( ialu_reg ); 8023 %} 8024 8025 instruct addL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 8026 %{ 8027 match(Set dst (AddL dst (LoadL src))); 8028 effect(KILL cr); 8029 8030 ins_cost(125); // XXX 8031 format %{ "addq $dst, $src\t# long" %} 8032 opcode(0x03); 8033 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 8034 ins_pipe(ialu_reg_mem); 8035 %} 8036 8037 instruct addL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 8038 %{ 8039 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 8040 effect(KILL cr); 8041 8042 ins_cost(150); // XXX 8043 format %{ "addq $dst, $src\t# long" %} 8044 opcode(0x01); /* Opcode 01 /r */ 8045 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 8046 ins_pipe(ialu_mem_reg); 8047 %} 8048 8049 instruct addL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 8050 %{ 8051 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 8052 effect(KILL cr); 8053 8054 ins_cost(125); // XXX 8055 format %{ "addq $dst, $src\t# long" %} 8056 opcode(0x81); /* Opcode 81 /0 id */ 8057 ins_encode(REX_mem_wide(dst), 8058 OpcSE(src), RM_opc_mem(0x00, dst), Con8or32(src)); 8059 ins_pipe(ialu_mem_imm); 8060 %} 8061 8062 instruct incL_rReg(rRegI dst, immL1 src, rFlagsReg cr) 8063 %{ 8064 predicate(UseIncDec); 8065 match(Set dst (AddL dst src)); 8066 effect(KILL cr); 8067 8068 format %{ "incq $dst\t# long" %} 8069 opcode(0xFF, 0x00); // FF /0 8070 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 8071 ins_pipe(ialu_reg); 8072 %} 8073 8074 instruct incL_mem(memory dst, immL1 src, rFlagsReg cr) 8075 %{ 8076 predicate(UseIncDec); 8077 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 8078 effect(KILL cr); 8079 8080 ins_cost(125); // XXX 8081 format %{ "incq $dst\t# long" %} 8082 opcode(0xFF); /* Opcode FF /0 */ 8083 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x00, dst)); 8084 ins_pipe(ialu_mem_imm); 8085 %} 8086 8087 // XXX why does that use AddL 8088 instruct decL_rReg(rRegL dst, immL_M1 src, rFlagsReg cr) 8089 %{ 8090 predicate(UseIncDec); 8091 match(Set dst (AddL dst src)); 8092 effect(KILL cr); 8093 8094 format %{ "decq $dst\t# long" %} 8095 opcode(0xFF, 0x01); // FF /1 8096 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 8097 ins_pipe(ialu_reg); 8098 %} 8099 8100 // XXX why does that use AddL 8101 instruct decL_mem(memory dst, immL_M1 src, rFlagsReg cr) 8102 %{ 8103 predicate(UseIncDec); 8104 match(Set dst (StoreL dst (AddL (LoadL dst) src))); 8105 effect(KILL cr); 8106 8107 ins_cost(125); // XXX 8108 format %{ "decq $dst\t# long" %} 8109 opcode(0xFF); /* Opcode FF /1 */ 8110 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(0x01, dst)); 8111 ins_pipe(ialu_mem_imm); 8112 %} 8113 8114 instruct leaL_rReg_immL(rRegL dst, rRegL src0, immL32 src1) 8115 %{ 8116 match(Set dst (AddL src0 src1)); 8117 8118 ins_cost(110); 8119 format %{ "leaq $dst, [$src0 + $src1]\t# long" %} 8120 opcode(0x8D); /* 0x8D /r */ 8121 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1)); // XXX 8122 ins_pipe(ialu_reg_reg); 8123 %} 8124 8125 instruct addP_rReg(rRegP dst, rRegL src, rFlagsReg cr) 8126 %{ 8127 match(Set dst (AddP dst src)); 8128 effect(KILL cr); 8129 8130 format %{ "addq $dst, $src\t# ptr" %} 8131 opcode(0x03); 8132 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 8133 ins_pipe(ialu_reg_reg); 8134 %} 8135 8136 instruct addP_rReg_imm(rRegP dst, immL32 src, rFlagsReg cr) 8137 %{ 8138 match(Set dst (AddP dst src)); 8139 effect(KILL cr); 8140 8141 format %{ "addq $dst, $src\t# ptr" %} 8142 opcode(0x81, 0x00); /* /0 id */ 8143 ins_encode(OpcSErm_wide(dst, src), Con8or32(src)); 8144 ins_pipe( ialu_reg ); 8145 %} 8146 8147 // XXX addP mem ops ???? 8148 8149 instruct leaP_rReg_imm(rRegP dst, rRegP src0, immL32 src1) 8150 %{ 8151 match(Set dst (AddP src0 src1)); 8152 8153 ins_cost(110); 8154 format %{ "leaq $dst, [$src0 + $src1]\t# ptr" %} 8155 opcode(0x8D); /* 0x8D /r */ 8156 ins_encode(REX_reg_reg_wide(dst, src0), OpcP, reg_lea(dst, src0, src1));// XXX 8157 ins_pipe(ialu_reg_reg); 8158 %} 8159 8160 instruct checkCastPP(rRegP dst) 8161 %{ 8162 match(Set dst (CheckCastPP dst)); 8163 8164 size(0); 8165 format %{ "# checkcastPP of $dst" %} 8166 ins_encode(/* empty encoding */); 8167 ins_pipe(empty); 8168 %} 8169 8170 instruct castPP(rRegP dst) 8171 %{ 8172 match(Set dst (CastPP dst)); 8173 8174 size(0); 8175 format %{ "# castPP of $dst" %} 8176 ins_encode(/* empty encoding */); 8177 ins_pipe(empty); 8178 %} 8179 8180 instruct castII(rRegI dst) 8181 %{ 8182 match(Set dst (CastII dst)); 8183 8184 size(0); 8185 format %{ "# castII of $dst" %} 8186 ins_encode(/* empty encoding */); 8187 ins_cost(0); 8188 ins_pipe(empty); 8189 %} 8190 8191 // LoadP-locked same as a regular LoadP when used with compare-swap 8192 instruct loadPLocked(rRegP dst, memory mem) 8193 %{ 8194 match(Set dst (LoadPLocked mem)); 8195 8196 ins_cost(125); // XXX 8197 format %{ "movq $dst, $mem\t# ptr locked" %} 8198 opcode(0x8B); 8199 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 8200 ins_pipe(ialu_reg_mem); // XXX 8201 %} 8202 8203 // LoadL-locked - same as a regular LoadL when used with compare-swap 8204 instruct loadLLocked(rRegL dst, memory mem) 8205 %{ 8206 match(Set dst (LoadLLocked mem)); 8207 8208 ins_cost(125); // XXX 8209 format %{ "movq $dst, $mem\t# long locked" %} 8210 opcode(0x8B); 8211 ins_encode(REX_reg_mem_wide(dst, mem), OpcP, reg_mem(dst, mem)); 8212 ins_pipe(ialu_reg_mem); // XXX 8213 %} 8214 8215 // Conditional-store of the updated heap-top. 8216 // Used during allocation of the shared heap. 8217 // Sets flags (EQ) on success. Implemented with a CMPXCHG on Intel. 8218 8219 instruct storePConditional(memory heap_top_ptr, 8220 rax_RegP oldval, rRegP newval, 8221 rFlagsReg cr) 8222 %{ 8223 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); 8224 8225 format %{ "cmpxchgq $heap_top_ptr, $newval\t# (ptr) " 8226 "If rax == $heap_top_ptr then store $newval into $heap_top_ptr" %} 8227 opcode(0x0F, 0xB1); 8228 ins_encode(lock_prefix, 8229 REX_reg_mem_wide(newval, heap_top_ptr), 8230 OpcP, OpcS, 8231 reg_mem(newval, heap_top_ptr)); 8232 ins_pipe(pipe_cmpxchg); 8233 %} 8234 8235 // Conditional-store of an int value. 8236 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. 8237 instruct storeIConditional(memory mem, rax_RegI oldval, rRegI newval, rFlagsReg cr) 8238 %{ 8239 match(Set cr (StoreIConditional mem (Binary oldval newval))); 8240 effect(KILL oldval); 8241 8242 format %{ "cmpxchgl $mem, $newval\t# If rax == $mem then store $newval into $mem" %} 8243 opcode(0x0F, 0xB1); 8244 ins_encode(lock_prefix, 8245 REX_reg_mem(newval, mem), 8246 OpcP, OpcS, 8247 reg_mem(newval, mem)); 8248 ins_pipe(pipe_cmpxchg); 8249 %} 8250 8251 // Conditional-store of a long value. 8252 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG. 8253 instruct storeLConditional(memory mem, rax_RegL oldval, rRegL newval, rFlagsReg cr) 8254 %{ 8255 match(Set cr (StoreLConditional mem (Binary oldval newval))); 8256 effect(KILL oldval); 8257 8258 format %{ "cmpxchgq $mem, $newval\t# If rax == $mem then store $newval into $mem" %} 8259 opcode(0x0F, 0xB1); 8260 ins_encode(lock_prefix, 8261 REX_reg_mem_wide(newval, mem), 8262 OpcP, OpcS, 8263 reg_mem(newval, mem)); 8264 ins_pipe(pipe_cmpxchg); 8265 %} 8266 8267 8268 // XXX No flag versions for CompareAndSwap{P,I,L} because matcher can't match them 8269 instruct compareAndSwapP(rRegI res, 8270 memory mem_ptr, 8271 rax_RegP oldval, rRegP newval, 8272 rFlagsReg cr) 8273 %{ 8274 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval))); 8275 effect(KILL cr, KILL oldval); 8276 8277 format %{ "cmpxchgq $mem_ptr,$newval\t# " 8278 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 8279 "sete $res\n\t" 8280 "movzbl $res, $res" %} 8281 opcode(0x0F, 0xB1); 8282 ins_encode(lock_prefix, 8283 REX_reg_mem_wide(newval, mem_ptr), 8284 OpcP, OpcS, 8285 reg_mem(newval, mem_ptr), 8286 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete 8287 REX_reg_breg(res, res), // movzbl 8288 Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); 8289 ins_pipe( pipe_cmpxchg ); 8290 %} 8291 8292 instruct compareAndSwapL(rRegI res, 8293 memory mem_ptr, 8294 rax_RegL oldval, rRegL newval, 8295 rFlagsReg cr) 8296 %{ 8297 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval))); 8298 effect(KILL cr, KILL oldval); 8299 8300 format %{ "cmpxchgq $mem_ptr,$newval\t# " 8301 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 8302 "sete $res\n\t" 8303 "movzbl $res, $res" %} 8304 opcode(0x0F, 0xB1); 8305 ins_encode(lock_prefix, 8306 REX_reg_mem_wide(newval, mem_ptr), 8307 OpcP, OpcS, 8308 reg_mem(newval, mem_ptr), 8309 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete 8310 REX_reg_breg(res, res), // movzbl 8311 Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); 8312 ins_pipe( pipe_cmpxchg ); 8313 %} 8314 8315 instruct compareAndSwapI(rRegI res, 8316 memory mem_ptr, 8317 rax_RegI oldval, rRegI newval, 8318 rFlagsReg cr) 8319 %{ 8320 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval))); 8321 effect(KILL cr, KILL oldval); 8322 8323 format %{ "cmpxchgl $mem_ptr,$newval\t# " 8324 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 8325 "sete $res\n\t" 8326 "movzbl $res, $res" %} 8327 opcode(0x0F, 0xB1); 8328 ins_encode(lock_prefix, 8329 REX_reg_mem(newval, mem_ptr), 8330 OpcP, OpcS, 8331 reg_mem(newval, mem_ptr), 8332 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete 8333 REX_reg_breg(res, res), // movzbl 8334 Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); 8335 ins_pipe( pipe_cmpxchg ); 8336 %} 8337 8338 8339 instruct compareAndSwapN(rRegI res, 8340 memory mem_ptr, 8341 rax_RegN oldval, rRegN newval, 8342 rFlagsReg cr) %{ 8343 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval))); 8344 effect(KILL cr, KILL oldval); 8345 8346 format %{ "cmpxchgl $mem_ptr,$newval\t# " 8347 "If rax == $mem_ptr then store $newval into $mem_ptr\n\t" 8348 "sete $res\n\t" 8349 "movzbl $res, $res" %} 8350 opcode(0x0F, 0xB1); 8351 ins_encode(lock_prefix, 8352 REX_reg_mem(newval, mem_ptr), 8353 OpcP, OpcS, 8354 reg_mem(newval, mem_ptr), 8355 REX_breg(res), Opcode(0x0F), Opcode(0x94), reg(res), // sete 8356 REX_reg_breg(res, res), // movzbl 8357 Opcode(0xF), Opcode(0xB6), reg_reg(res, res)); 8358 ins_pipe( pipe_cmpxchg ); 8359 %} 8360 8361 //----------Subtraction Instructions------------------------------------------- 8362 8363 // Integer Subtraction Instructions 8364 instruct subI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 8365 %{ 8366 match(Set dst (SubI dst src)); 8367 effect(KILL cr); 8368 8369 format %{ "subl $dst, $src\t# int" %} 8370 opcode(0x2B); 8371 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src)); 8372 ins_pipe(ialu_reg_reg); 8373 %} 8374 8375 instruct subI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 8376 %{ 8377 match(Set dst (SubI dst src)); 8378 effect(KILL cr); 8379 8380 format %{ "subl $dst, $src\t# int" %} 8381 opcode(0x81, 0x05); /* Opcode 81 /5 */ 8382 ins_encode(OpcSErm(dst, src), Con8or32(src)); 8383 ins_pipe(ialu_reg); 8384 %} 8385 8386 instruct subI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 8387 %{ 8388 match(Set dst (SubI dst (LoadI src))); 8389 effect(KILL cr); 8390 8391 ins_cost(125); 8392 format %{ "subl $dst, $src\t# int" %} 8393 opcode(0x2B); 8394 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 8395 ins_pipe(ialu_reg_mem); 8396 %} 8397 8398 instruct subI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 8399 %{ 8400 match(Set dst (StoreI dst (SubI (LoadI dst) src))); 8401 effect(KILL cr); 8402 8403 ins_cost(150); 8404 format %{ "subl $dst, $src\t# int" %} 8405 opcode(0x29); /* Opcode 29 /r */ 8406 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 8407 ins_pipe(ialu_mem_reg); 8408 %} 8409 8410 instruct subI_mem_imm(memory dst, immI src, rFlagsReg cr) 8411 %{ 8412 match(Set dst (StoreI dst (SubI (LoadI dst) src))); 8413 effect(KILL cr); 8414 8415 ins_cost(125); // XXX 8416 format %{ "subl $dst, $src\t# int" %} 8417 opcode(0x81); /* Opcode 81 /5 id */ 8418 ins_encode(REX_mem(dst), OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src)); 8419 ins_pipe(ialu_mem_imm); 8420 %} 8421 8422 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 8423 %{ 8424 match(Set dst (SubL dst src)); 8425 effect(KILL cr); 8426 8427 format %{ "subq $dst, $src\t# long" %} 8428 opcode(0x2B); 8429 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 8430 ins_pipe(ialu_reg_reg); 8431 %} 8432 8433 instruct subL_rReg_imm(rRegI dst, immL32 src, rFlagsReg cr) 8434 %{ 8435 match(Set dst (SubL dst src)); 8436 effect(KILL cr); 8437 8438 format %{ "subq $dst, $src\t# long" %} 8439 opcode(0x81, 0x05); /* Opcode 81 /5 */ 8440 ins_encode(OpcSErm_wide(dst, src), Con8or32(src)); 8441 ins_pipe(ialu_reg); 8442 %} 8443 8444 instruct subL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 8445 %{ 8446 match(Set dst (SubL dst (LoadL src))); 8447 effect(KILL cr); 8448 8449 ins_cost(125); 8450 format %{ "subq $dst, $src\t# long" %} 8451 opcode(0x2B); 8452 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 8453 ins_pipe(ialu_reg_mem); 8454 %} 8455 8456 instruct subL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 8457 %{ 8458 match(Set dst (StoreL dst (SubL (LoadL dst) src))); 8459 effect(KILL cr); 8460 8461 ins_cost(150); 8462 format %{ "subq $dst, $src\t# long" %} 8463 opcode(0x29); /* Opcode 29 /r */ 8464 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 8465 ins_pipe(ialu_mem_reg); 8466 %} 8467 8468 instruct subL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 8469 %{ 8470 match(Set dst (StoreL dst (SubL (LoadL dst) src))); 8471 effect(KILL cr); 8472 8473 ins_cost(125); // XXX 8474 format %{ "subq $dst, $src\t# long" %} 8475 opcode(0x81); /* Opcode 81 /5 id */ 8476 ins_encode(REX_mem_wide(dst), 8477 OpcSE(src), RM_opc_mem(0x05, dst), Con8or32(src)); 8478 ins_pipe(ialu_mem_imm); 8479 %} 8480 8481 // Subtract from a pointer 8482 // XXX hmpf??? 8483 instruct subP_rReg(rRegP dst, rRegI src, immI0 zero, rFlagsReg cr) 8484 %{ 8485 match(Set dst (AddP dst (SubI zero src))); 8486 effect(KILL cr); 8487 8488 format %{ "subq $dst, $src\t# ptr - int" %} 8489 opcode(0x2B); 8490 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 8491 ins_pipe(ialu_reg_reg); 8492 %} 8493 8494 instruct negI_rReg(rRegI dst, immI0 zero, rFlagsReg cr) 8495 %{ 8496 match(Set dst (SubI zero dst)); 8497 effect(KILL cr); 8498 8499 format %{ "negl $dst\t# int" %} 8500 opcode(0xF7, 0x03); // Opcode F7 /3 8501 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 8502 ins_pipe(ialu_reg); 8503 %} 8504 8505 instruct negI_mem(memory dst, immI0 zero, rFlagsReg cr) 8506 %{ 8507 match(Set dst (StoreI dst (SubI zero (LoadI dst)))); 8508 effect(KILL cr); 8509 8510 format %{ "negl $dst\t# int" %} 8511 opcode(0xF7, 0x03); // Opcode F7 /3 8512 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 8513 ins_pipe(ialu_reg); 8514 %} 8515 8516 instruct negL_rReg(rRegL dst, immL0 zero, rFlagsReg cr) 8517 %{ 8518 match(Set dst (SubL zero dst)); 8519 effect(KILL cr); 8520 8521 format %{ "negq $dst\t# long" %} 8522 opcode(0xF7, 0x03); // Opcode F7 /3 8523 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 8524 ins_pipe(ialu_reg); 8525 %} 8526 8527 instruct negL_mem(memory dst, immL0 zero, rFlagsReg cr) 8528 %{ 8529 match(Set dst (StoreL dst (SubL zero (LoadL dst)))); 8530 effect(KILL cr); 8531 8532 format %{ "negq $dst\t# long" %} 8533 opcode(0xF7, 0x03); // Opcode F7 /3 8534 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 8535 ins_pipe(ialu_reg); 8536 %} 8537 8538 8539 //----------Multiplication/Division Instructions------------------------------- 8540 // Integer Multiplication Instructions 8541 // Multiply Register 8542 8543 instruct mulI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 8544 %{ 8545 match(Set dst (MulI dst src)); 8546 effect(KILL cr); 8547 8548 ins_cost(300); 8549 format %{ "imull $dst, $src\t# int" %} 8550 opcode(0x0F, 0xAF); 8551 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 8552 ins_pipe(ialu_reg_reg_alu0); 8553 %} 8554 8555 instruct mulI_rReg_imm(rRegI dst, rRegI src, immI imm, rFlagsReg cr) 8556 %{ 8557 match(Set dst (MulI src imm)); 8558 effect(KILL cr); 8559 8560 ins_cost(300); 8561 format %{ "imull $dst, $src, $imm\t# int" %} 8562 opcode(0x69); /* 69 /r id */ 8563 ins_encode(REX_reg_reg(dst, src), 8564 OpcSE(imm), reg_reg(dst, src), Con8or32(imm)); 8565 ins_pipe(ialu_reg_reg_alu0); 8566 %} 8567 8568 instruct mulI_mem(rRegI dst, memory src, rFlagsReg cr) 8569 %{ 8570 match(Set dst (MulI dst (LoadI src))); 8571 effect(KILL cr); 8572 8573 ins_cost(350); 8574 format %{ "imull $dst, $src\t# int" %} 8575 opcode(0x0F, 0xAF); 8576 ins_encode(REX_reg_mem(dst, src), OpcP, OpcS, reg_mem(dst, src)); 8577 ins_pipe(ialu_reg_mem_alu0); 8578 %} 8579 8580 instruct mulI_mem_imm(rRegI dst, memory src, immI imm, rFlagsReg cr) 8581 %{ 8582 match(Set dst (MulI (LoadI src) imm)); 8583 effect(KILL cr); 8584 8585 ins_cost(300); 8586 format %{ "imull $dst, $src, $imm\t# int" %} 8587 opcode(0x69); /* 69 /r id */ 8588 ins_encode(REX_reg_mem(dst, src), 8589 OpcSE(imm), reg_mem(dst, src), Con8or32(imm)); 8590 ins_pipe(ialu_reg_mem_alu0); 8591 %} 8592 8593 instruct mulL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 8594 %{ 8595 match(Set dst (MulL dst src)); 8596 effect(KILL cr); 8597 8598 ins_cost(300); 8599 format %{ "imulq $dst, $src\t# long" %} 8600 opcode(0x0F, 0xAF); 8601 ins_encode(REX_reg_reg_wide(dst, src), OpcP, OpcS, reg_reg(dst, src)); 8602 ins_pipe(ialu_reg_reg_alu0); 8603 %} 8604 8605 instruct mulL_rReg_imm(rRegL dst, rRegL src, immL32 imm, rFlagsReg cr) 8606 %{ 8607 match(Set dst (MulL src imm)); 8608 effect(KILL cr); 8609 8610 ins_cost(300); 8611 format %{ "imulq $dst, $src, $imm\t# long" %} 8612 opcode(0x69); /* 69 /r id */ 8613 ins_encode(REX_reg_reg_wide(dst, src), 8614 OpcSE(imm), reg_reg(dst, src), Con8or32(imm)); 8615 ins_pipe(ialu_reg_reg_alu0); 8616 %} 8617 8618 instruct mulL_mem(rRegL dst, memory src, rFlagsReg cr) 8619 %{ 8620 match(Set dst (MulL dst (LoadL src))); 8621 effect(KILL cr); 8622 8623 ins_cost(350); 8624 format %{ "imulq $dst, $src\t# long" %} 8625 opcode(0x0F, 0xAF); 8626 ins_encode(REX_reg_mem_wide(dst, src), OpcP, OpcS, reg_mem(dst, src)); 8627 ins_pipe(ialu_reg_mem_alu0); 8628 %} 8629 8630 instruct mulL_mem_imm(rRegL dst, memory src, immL32 imm, rFlagsReg cr) 8631 %{ 8632 match(Set dst (MulL (LoadL src) imm)); 8633 effect(KILL cr); 8634 8635 ins_cost(300); 8636 format %{ "imulq $dst, $src, $imm\t# long" %} 8637 opcode(0x69); /* 69 /r id */ 8638 ins_encode(REX_reg_mem_wide(dst, src), 8639 OpcSE(imm), reg_mem(dst, src), Con8or32(imm)); 8640 ins_pipe(ialu_reg_mem_alu0); 8641 %} 8642 8643 instruct mulHiL_rReg(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr) 8644 %{ 8645 match(Set dst (MulHiL src rax)); 8646 effect(USE_KILL rax, KILL cr); 8647 8648 ins_cost(300); 8649 format %{ "imulq RDX:RAX, RAX, $src\t# mulhi" %} 8650 opcode(0xF7, 0x5); /* Opcode F7 /5 */ 8651 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src)); 8652 ins_pipe(ialu_reg_reg_alu0); 8653 %} 8654 8655 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div, 8656 rFlagsReg cr) 8657 %{ 8658 match(Set rax (DivI rax div)); 8659 effect(KILL rdx, KILL cr); 8660 8661 ins_cost(30*100+10*100); // XXX 8662 format %{ "cmpl rax, 0x80000000\t# idiv\n\t" 8663 "jne,s normal\n\t" 8664 "xorl rdx, rdx\n\t" 8665 "cmpl $div, -1\n\t" 8666 "je,s done\n" 8667 "normal: cdql\n\t" 8668 "idivl $div\n" 8669 "done:" %} 8670 opcode(0xF7, 0x7); /* Opcode F7 /7 */ 8671 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div)); 8672 ins_pipe(ialu_reg_reg_alu0); 8673 %} 8674 8675 instruct divL_rReg(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div, 8676 rFlagsReg cr) 8677 %{ 8678 match(Set rax (DivL rax div)); 8679 effect(KILL rdx, KILL cr); 8680 8681 ins_cost(30*100+10*100); // XXX 8682 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t" 8683 "cmpq rax, rdx\n\t" 8684 "jne,s normal\n\t" 8685 "xorl rdx, rdx\n\t" 8686 "cmpq $div, -1\n\t" 8687 "je,s done\n" 8688 "normal: cdqq\n\t" 8689 "idivq $div\n" 8690 "done:" %} 8691 opcode(0xF7, 0x7); /* Opcode F7 /7 */ 8692 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div)); 8693 ins_pipe(ialu_reg_reg_alu0); 8694 %} 8695 8696 // Integer DIVMOD with Register, both quotient and mod results 8697 instruct divModI_rReg_divmod(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div, 8698 rFlagsReg cr) 8699 %{ 8700 match(DivModI rax div); 8701 effect(KILL cr); 8702 8703 ins_cost(30*100+10*100); // XXX 8704 format %{ "cmpl rax, 0x80000000\t# idiv\n\t" 8705 "jne,s normal\n\t" 8706 "xorl rdx, rdx\n\t" 8707 "cmpl $div, -1\n\t" 8708 "je,s done\n" 8709 "normal: cdql\n\t" 8710 "idivl $div\n" 8711 "done:" %} 8712 opcode(0xF7, 0x7); /* Opcode F7 /7 */ 8713 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div)); 8714 ins_pipe(pipe_slow); 8715 %} 8716 8717 // Long DIVMOD with Register, both quotient and mod results 8718 instruct divModL_rReg_divmod(rax_RegL rax, rdx_RegL rdx, no_rax_rdx_RegL div, 8719 rFlagsReg cr) 8720 %{ 8721 match(DivModL rax div); 8722 effect(KILL cr); 8723 8724 ins_cost(30*100+10*100); // XXX 8725 format %{ "movq rdx, 0x8000000000000000\t# ldiv\n\t" 8726 "cmpq rax, rdx\n\t" 8727 "jne,s normal\n\t" 8728 "xorl rdx, rdx\n\t" 8729 "cmpq $div, -1\n\t" 8730 "je,s done\n" 8731 "normal: cdqq\n\t" 8732 "idivq $div\n" 8733 "done:" %} 8734 opcode(0xF7, 0x7); /* Opcode F7 /7 */ 8735 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div)); 8736 ins_pipe(pipe_slow); 8737 %} 8738 8739 //----------- DivL-By-Constant-Expansions-------------------------------------- 8740 // DivI cases are handled by the compiler 8741 8742 // Magic constant, reciprocal of 10 8743 instruct loadConL_0x6666666666666667(rRegL dst) 8744 %{ 8745 effect(DEF dst); 8746 8747 format %{ "movq $dst, #0x666666666666667\t# Used in div-by-10" %} 8748 ins_encode(load_immL(dst, 0x6666666666666667)); 8749 ins_pipe(ialu_reg); 8750 %} 8751 8752 instruct mul_hi(rdx_RegL dst, no_rax_RegL src, rax_RegL rax, rFlagsReg cr) 8753 %{ 8754 effect(DEF dst, USE src, USE_KILL rax, KILL cr); 8755 8756 format %{ "imulq rdx:rax, rax, $src\t# Used in div-by-10" %} 8757 opcode(0xF7, 0x5); /* Opcode F7 /5 */ 8758 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src)); 8759 ins_pipe(ialu_reg_reg_alu0); 8760 %} 8761 8762 instruct sarL_rReg_63(rRegL dst, rFlagsReg cr) 8763 %{ 8764 effect(USE_DEF dst, KILL cr); 8765 8766 format %{ "sarq $dst, #63\t# Used in div-by-10" %} 8767 opcode(0xC1, 0x7); /* C1 /7 ib */ 8768 ins_encode(reg_opc_imm_wide(dst, 0x3F)); 8769 ins_pipe(ialu_reg); 8770 %} 8771 8772 instruct sarL_rReg_2(rRegL dst, rFlagsReg cr) 8773 %{ 8774 effect(USE_DEF dst, KILL cr); 8775 8776 format %{ "sarq $dst, #2\t# Used in div-by-10" %} 8777 opcode(0xC1, 0x7); /* C1 /7 ib */ 8778 ins_encode(reg_opc_imm_wide(dst, 0x2)); 8779 ins_pipe(ialu_reg); 8780 %} 8781 8782 instruct divL_10(rdx_RegL dst, no_rax_RegL src, immL10 div) 8783 %{ 8784 match(Set dst (DivL src div)); 8785 8786 ins_cost((5+8)*100); 8787 expand %{ 8788 rax_RegL rax; // Killed temp 8789 rFlagsReg cr; // Killed 8790 loadConL_0x6666666666666667(rax); // movq rax, 0x6666666666666667 8791 mul_hi(dst, src, rax, cr); // mulq rdx:rax <= rax * $src 8792 sarL_rReg_63(src, cr); // sarq src, 63 8793 sarL_rReg_2(dst, cr); // sarq rdx, 2 8794 subL_rReg(dst, src, cr); // subl rdx, src 8795 %} 8796 %} 8797 8798 //----------------------------------------------------------------------------- 8799 8800 instruct modI_rReg(rdx_RegI rdx, rax_RegI rax, no_rax_rdx_RegI div, 8801 rFlagsReg cr) 8802 %{ 8803 match(Set rdx (ModI rax div)); 8804 effect(KILL rax, KILL cr); 8805 8806 ins_cost(300); // XXX 8807 format %{ "cmpl rax, 0x80000000\t# irem\n\t" 8808 "jne,s normal\n\t" 8809 "xorl rdx, rdx\n\t" 8810 "cmpl $div, -1\n\t" 8811 "je,s done\n" 8812 "normal: cdql\n\t" 8813 "idivl $div\n" 8814 "done:" %} 8815 opcode(0xF7, 0x7); /* Opcode F7 /7 */ 8816 ins_encode(cdql_enc(div), REX_reg(div), OpcP, reg_opc(div)); 8817 ins_pipe(ialu_reg_reg_alu0); 8818 %} 8819 8820 instruct modL_rReg(rdx_RegL rdx, rax_RegL rax, no_rax_rdx_RegL div, 8821 rFlagsReg cr) 8822 %{ 8823 match(Set rdx (ModL rax div)); 8824 effect(KILL rax, KILL cr); 8825 8826 ins_cost(300); // XXX 8827 format %{ "movq rdx, 0x8000000000000000\t# lrem\n\t" 8828 "cmpq rax, rdx\n\t" 8829 "jne,s normal\n\t" 8830 "xorl rdx, rdx\n\t" 8831 "cmpq $div, -1\n\t" 8832 "je,s done\n" 8833 "normal: cdqq\n\t" 8834 "idivq $div\n" 8835 "done:" %} 8836 opcode(0xF7, 0x7); /* Opcode F7 /7 */ 8837 ins_encode(cdqq_enc(div), REX_reg_wide(div), OpcP, reg_opc(div)); 8838 ins_pipe(ialu_reg_reg_alu0); 8839 %} 8840 8841 // Integer Shift Instructions 8842 // Shift Left by one 8843 instruct salI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr) 8844 %{ 8845 match(Set dst (LShiftI dst shift)); 8846 effect(KILL cr); 8847 8848 format %{ "sall $dst, $shift" %} 8849 opcode(0xD1, 0x4); /* D1 /4 */ 8850 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 8851 ins_pipe(ialu_reg); 8852 %} 8853 8854 // Shift Left by one 8855 instruct salI_mem_1(memory dst, immI1 shift, rFlagsReg cr) 8856 %{ 8857 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift))); 8858 effect(KILL cr); 8859 8860 format %{ "sall $dst, $shift\t" %} 8861 opcode(0xD1, 0x4); /* D1 /4 */ 8862 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 8863 ins_pipe(ialu_mem_imm); 8864 %} 8865 8866 // Shift Left by 8-bit immediate 8867 instruct salI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr) 8868 %{ 8869 match(Set dst (LShiftI dst shift)); 8870 effect(KILL cr); 8871 8872 format %{ "sall $dst, $shift" %} 8873 opcode(0xC1, 0x4); /* C1 /4 ib */ 8874 ins_encode(reg_opc_imm(dst, shift)); 8875 ins_pipe(ialu_reg); 8876 %} 8877 8878 // Shift Left by 8-bit immediate 8879 instruct salI_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 8880 %{ 8881 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift))); 8882 effect(KILL cr); 8883 8884 format %{ "sall $dst, $shift" %} 8885 opcode(0xC1, 0x4); /* C1 /4 ib */ 8886 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift)); 8887 ins_pipe(ialu_mem_imm); 8888 %} 8889 8890 // Shift Left by variable 8891 instruct salI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr) 8892 %{ 8893 match(Set dst (LShiftI dst shift)); 8894 effect(KILL cr); 8895 8896 format %{ "sall $dst, $shift" %} 8897 opcode(0xD3, 0x4); /* D3 /4 */ 8898 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 8899 ins_pipe(ialu_reg_reg); 8900 %} 8901 8902 // Shift Left by variable 8903 instruct salI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 8904 %{ 8905 match(Set dst (StoreI dst (LShiftI (LoadI dst) shift))); 8906 effect(KILL cr); 8907 8908 format %{ "sall $dst, $shift" %} 8909 opcode(0xD3, 0x4); /* D3 /4 */ 8910 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 8911 ins_pipe(ialu_mem_reg); 8912 %} 8913 8914 // Arithmetic shift right by one 8915 instruct sarI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr) 8916 %{ 8917 match(Set dst (RShiftI dst shift)); 8918 effect(KILL cr); 8919 8920 format %{ "sarl $dst, $shift" %} 8921 opcode(0xD1, 0x7); /* D1 /7 */ 8922 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 8923 ins_pipe(ialu_reg); 8924 %} 8925 8926 // Arithmetic shift right by one 8927 instruct sarI_mem_1(memory dst, immI1 shift, rFlagsReg cr) 8928 %{ 8929 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift))); 8930 effect(KILL cr); 8931 8932 format %{ "sarl $dst, $shift" %} 8933 opcode(0xD1, 0x7); /* D1 /7 */ 8934 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 8935 ins_pipe(ialu_mem_imm); 8936 %} 8937 8938 // Arithmetic Shift Right by 8-bit immediate 8939 instruct sarI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr) 8940 %{ 8941 match(Set dst (RShiftI dst shift)); 8942 effect(KILL cr); 8943 8944 format %{ "sarl $dst, $shift" %} 8945 opcode(0xC1, 0x7); /* C1 /7 ib */ 8946 ins_encode(reg_opc_imm(dst, shift)); 8947 ins_pipe(ialu_mem_imm); 8948 %} 8949 8950 // Arithmetic Shift Right by 8-bit immediate 8951 instruct sarI_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 8952 %{ 8953 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift))); 8954 effect(KILL cr); 8955 8956 format %{ "sarl $dst, $shift" %} 8957 opcode(0xC1, 0x7); /* C1 /7 ib */ 8958 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift)); 8959 ins_pipe(ialu_mem_imm); 8960 %} 8961 8962 // Arithmetic Shift Right by variable 8963 instruct sarI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr) 8964 %{ 8965 match(Set dst (RShiftI dst shift)); 8966 effect(KILL cr); 8967 8968 format %{ "sarl $dst, $shift" %} 8969 opcode(0xD3, 0x7); /* D3 /7 */ 8970 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 8971 ins_pipe(ialu_reg_reg); 8972 %} 8973 8974 // Arithmetic Shift Right by variable 8975 instruct sarI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 8976 %{ 8977 match(Set dst (StoreI dst (RShiftI (LoadI dst) shift))); 8978 effect(KILL cr); 8979 8980 format %{ "sarl $dst, $shift" %} 8981 opcode(0xD3, 0x7); /* D3 /7 */ 8982 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 8983 ins_pipe(ialu_mem_reg); 8984 %} 8985 8986 // Logical shift right by one 8987 instruct shrI_rReg_1(rRegI dst, immI1 shift, rFlagsReg cr) 8988 %{ 8989 match(Set dst (URShiftI dst shift)); 8990 effect(KILL cr); 8991 8992 format %{ "shrl $dst, $shift" %} 8993 opcode(0xD1, 0x5); /* D1 /5 */ 8994 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 8995 ins_pipe(ialu_reg); 8996 %} 8997 8998 // Logical shift right by one 8999 instruct shrI_mem_1(memory dst, immI1 shift, rFlagsReg cr) 9000 %{ 9001 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift))); 9002 effect(KILL cr); 9003 9004 format %{ "shrl $dst, $shift" %} 9005 opcode(0xD1, 0x5); /* D1 /5 */ 9006 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 9007 ins_pipe(ialu_mem_imm); 9008 %} 9009 9010 // Logical Shift Right by 8-bit immediate 9011 instruct shrI_rReg_imm(rRegI dst, immI8 shift, rFlagsReg cr) 9012 %{ 9013 match(Set dst (URShiftI dst shift)); 9014 effect(KILL cr); 9015 9016 format %{ "shrl $dst, $shift" %} 9017 opcode(0xC1, 0x5); /* C1 /5 ib */ 9018 ins_encode(reg_opc_imm(dst, shift)); 9019 ins_pipe(ialu_reg); 9020 %} 9021 9022 // Logical Shift Right by 8-bit immediate 9023 instruct shrI_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 9024 %{ 9025 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift))); 9026 effect(KILL cr); 9027 9028 format %{ "shrl $dst, $shift" %} 9029 opcode(0xC1, 0x5); /* C1 /5 ib */ 9030 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst), Con8or32(shift)); 9031 ins_pipe(ialu_mem_imm); 9032 %} 9033 9034 // Logical Shift Right by variable 9035 instruct shrI_rReg_CL(rRegI dst, rcx_RegI shift, rFlagsReg cr) 9036 %{ 9037 match(Set dst (URShiftI dst shift)); 9038 effect(KILL cr); 9039 9040 format %{ "shrl $dst, $shift" %} 9041 opcode(0xD3, 0x5); /* D3 /5 */ 9042 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 9043 ins_pipe(ialu_reg_reg); 9044 %} 9045 9046 // Logical Shift Right by variable 9047 instruct shrI_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9048 %{ 9049 match(Set dst (StoreI dst (URShiftI (LoadI dst) shift))); 9050 effect(KILL cr); 9051 9052 format %{ "shrl $dst, $shift" %} 9053 opcode(0xD3, 0x5); /* D3 /5 */ 9054 ins_encode(REX_mem(dst), OpcP, RM_opc_mem(secondary, dst)); 9055 ins_pipe(ialu_mem_reg); 9056 %} 9057 9058 // Long Shift Instructions 9059 // Shift Left by one 9060 instruct salL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr) 9061 %{ 9062 match(Set dst (LShiftL dst shift)); 9063 effect(KILL cr); 9064 9065 format %{ "salq $dst, $shift" %} 9066 opcode(0xD1, 0x4); /* D1 /4 */ 9067 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9068 ins_pipe(ialu_reg); 9069 %} 9070 9071 // Shift Left by one 9072 instruct salL_mem_1(memory dst, immI1 shift, rFlagsReg cr) 9073 %{ 9074 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift))); 9075 effect(KILL cr); 9076 9077 format %{ "salq $dst, $shift" %} 9078 opcode(0xD1, 0x4); /* D1 /4 */ 9079 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 9080 ins_pipe(ialu_mem_imm); 9081 %} 9082 9083 // Shift Left by 8-bit immediate 9084 instruct salL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr) 9085 %{ 9086 match(Set dst (LShiftL dst shift)); 9087 effect(KILL cr); 9088 9089 format %{ "salq $dst, $shift" %} 9090 opcode(0xC1, 0x4); /* C1 /4 ib */ 9091 ins_encode(reg_opc_imm_wide(dst, shift)); 9092 ins_pipe(ialu_reg); 9093 %} 9094 9095 // Shift Left by 8-bit immediate 9096 instruct salL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 9097 %{ 9098 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift))); 9099 effect(KILL cr); 9100 9101 format %{ "salq $dst, $shift" %} 9102 opcode(0xC1, 0x4); /* C1 /4 ib */ 9103 ins_encode(REX_mem_wide(dst), OpcP, 9104 RM_opc_mem(secondary, dst), Con8or32(shift)); 9105 ins_pipe(ialu_mem_imm); 9106 %} 9107 9108 // Shift Left by variable 9109 instruct salL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9110 %{ 9111 match(Set dst (LShiftL dst shift)); 9112 effect(KILL cr); 9113 9114 format %{ "salq $dst, $shift" %} 9115 opcode(0xD3, 0x4); /* D3 /4 */ 9116 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9117 ins_pipe(ialu_reg_reg); 9118 %} 9119 9120 // Shift Left by variable 9121 instruct salL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9122 %{ 9123 match(Set dst (StoreL dst (LShiftL (LoadL dst) shift))); 9124 effect(KILL cr); 9125 9126 format %{ "salq $dst, $shift" %} 9127 opcode(0xD3, 0x4); /* D3 /4 */ 9128 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 9129 ins_pipe(ialu_mem_reg); 9130 %} 9131 9132 // Arithmetic shift right by one 9133 instruct sarL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr) 9134 %{ 9135 match(Set dst (RShiftL dst shift)); 9136 effect(KILL cr); 9137 9138 format %{ "sarq $dst, $shift" %} 9139 opcode(0xD1, 0x7); /* D1 /7 */ 9140 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9141 ins_pipe(ialu_reg); 9142 %} 9143 9144 // Arithmetic shift right by one 9145 instruct sarL_mem_1(memory dst, immI1 shift, rFlagsReg cr) 9146 %{ 9147 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift))); 9148 effect(KILL cr); 9149 9150 format %{ "sarq $dst, $shift" %} 9151 opcode(0xD1, 0x7); /* D1 /7 */ 9152 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 9153 ins_pipe(ialu_mem_imm); 9154 %} 9155 9156 // Arithmetic Shift Right by 8-bit immediate 9157 instruct sarL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr) 9158 %{ 9159 match(Set dst (RShiftL dst shift)); 9160 effect(KILL cr); 9161 9162 format %{ "sarq $dst, $shift" %} 9163 opcode(0xC1, 0x7); /* C1 /7 ib */ 9164 ins_encode(reg_opc_imm_wide(dst, shift)); 9165 ins_pipe(ialu_mem_imm); 9166 %} 9167 9168 // Arithmetic Shift Right by 8-bit immediate 9169 instruct sarL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 9170 %{ 9171 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift))); 9172 effect(KILL cr); 9173 9174 format %{ "sarq $dst, $shift" %} 9175 opcode(0xC1, 0x7); /* C1 /7 ib */ 9176 ins_encode(REX_mem_wide(dst), OpcP, 9177 RM_opc_mem(secondary, dst), Con8or32(shift)); 9178 ins_pipe(ialu_mem_imm); 9179 %} 9180 9181 // Arithmetic Shift Right by variable 9182 instruct sarL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9183 %{ 9184 match(Set dst (RShiftL dst shift)); 9185 effect(KILL cr); 9186 9187 format %{ "sarq $dst, $shift" %} 9188 opcode(0xD3, 0x7); /* D3 /7 */ 9189 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9190 ins_pipe(ialu_reg_reg); 9191 %} 9192 9193 // Arithmetic Shift Right by variable 9194 instruct sarL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9195 %{ 9196 match(Set dst (StoreL dst (RShiftL (LoadL dst) shift))); 9197 effect(KILL cr); 9198 9199 format %{ "sarq $dst, $shift" %} 9200 opcode(0xD3, 0x7); /* D3 /7 */ 9201 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 9202 ins_pipe(ialu_mem_reg); 9203 %} 9204 9205 // Logical shift right by one 9206 instruct shrL_rReg_1(rRegL dst, immI1 shift, rFlagsReg cr) 9207 %{ 9208 match(Set dst (URShiftL dst shift)); 9209 effect(KILL cr); 9210 9211 format %{ "shrq $dst, $shift" %} 9212 opcode(0xD1, 0x5); /* D1 /5 */ 9213 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst )); 9214 ins_pipe(ialu_reg); 9215 %} 9216 9217 // Logical shift right by one 9218 instruct shrL_mem_1(memory dst, immI1 shift, rFlagsReg cr) 9219 %{ 9220 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift))); 9221 effect(KILL cr); 9222 9223 format %{ "shrq $dst, $shift" %} 9224 opcode(0xD1, 0x5); /* D1 /5 */ 9225 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 9226 ins_pipe(ialu_mem_imm); 9227 %} 9228 9229 // Logical Shift Right by 8-bit immediate 9230 instruct shrL_rReg_imm(rRegL dst, immI8 shift, rFlagsReg cr) 9231 %{ 9232 match(Set dst (URShiftL dst shift)); 9233 effect(KILL cr); 9234 9235 format %{ "shrq $dst, $shift" %} 9236 opcode(0xC1, 0x5); /* C1 /5 ib */ 9237 ins_encode(reg_opc_imm_wide(dst, shift)); 9238 ins_pipe(ialu_reg); 9239 %} 9240 9241 9242 // Logical Shift Right by 8-bit immediate 9243 instruct shrL_mem_imm(memory dst, immI8 shift, rFlagsReg cr) 9244 %{ 9245 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift))); 9246 effect(KILL cr); 9247 9248 format %{ "shrq $dst, $shift" %} 9249 opcode(0xC1, 0x5); /* C1 /5 ib */ 9250 ins_encode(REX_mem_wide(dst), OpcP, 9251 RM_opc_mem(secondary, dst), Con8or32(shift)); 9252 ins_pipe(ialu_mem_imm); 9253 %} 9254 9255 // Logical Shift Right by variable 9256 instruct shrL_rReg_CL(rRegL dst, rcx_RegI shift, rFlagsReg cr) 9257 %{ 9258 match(Set dst (URShiftL dst shift)); 9259 effect(KILL cr); 9260 9261 format %{ "shrq $dst, $shift" %} 9262 opcode(0xD3, 0x5); /* D3 /5 */ 9263 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9264 ins_pipe(ialu_reg_reg); 9265 %} 9266 9267 // Logical Shift Right by variable 9268 instruct shrL_mem_CL(memory dst, rcx_RegI shift, rFlagsReg cr) 9269 %{ 9270 match(Set dst (StoreL dst (URShiftL (LoadL dst) shift))); 9271 effect(KILL cr); 9272 9273 format %{ "shrq $dst, $shift" %} 9274 opcode(0xD3, 0x5); /* D3 /5 */ 9275 ins_encode(REX_mem_wide(dst), OpcP, RM_opc_mem(secondary, dst)); 9276 ins_pipe(ialu_mem_reg); 9277 %} 9278 9279 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24. 9280 // This idiom is used by the compiler for the i2b bytecode. 9281 instruct i2b(rRegI dst, rRegI src, immI_24 twentyfour) 9282 %{ 9283 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour)); 9284 9285 format %{ "movsbl $dst, $src\t# i2b" %} 9286 opcode(0x0F, 0xBE); 9287 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 9288 ins_pipe(ialu_reg_reg); 9289 %} 9290 9291 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16. 9292 // This idiom is used by the compiler the i2s bytecode. 9293 instruct i2s(rRegI dst, rRegI src, immI_16 sixteen) 9294 %{ 9295 match(Set dst (RShiftI (LShiftI src sixteen) sixteen)); 9296 9297 format %{ "movswl $dst, $src\t# i2s" %} 9298 opcode(0x0F, 0xBF); 9299 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 9300 ins_pipe(ialu_reg_reg); 9301 %} 9302 9303 // ROL/ROR instructions 9304 9305 // ROL expand 9306 instruct rolI_rReg_imm1(rRegI dst, rFlagsReg cr) %{ 9307 effect(KILL cr, USE_DEF dst); 9308 9309 format %{ "roll $dst" %} 9310 opcode(0xD1, 0x0); /* Opcode D1 /0 */ 9311 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 9312 ins_pipe(ialu_reg); 9313 %} 9314 9315 instruct rolI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) %{ 9316 effect(USE_DEF dst, USE shift, KILL cr); 9317 9318 format %{ "roll $dst, $shift" %} 9319 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */ 9320 ins_encode( reg_opc_imm(dst, shift) ); 9321 ins_pipe(ialu_reg); 9322 %} 9323 9324 instruct rolI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr) 9325 %{ 9326 effect(USE_DEF dst, USE shift, KILL cr); 9327 9328 format %{ "roll $dst, $shift" %} 9329 opcode(0xD3, 0x0); /* Opcode D3 /0 */ 9330 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 9331 ins_pipe(ialu_reg_reg); 9332 %} 9333 // end of ROL expand 9334 9335 // Rotate Left by one 9336 instruct rolI_rReg_i1(rRegI dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr) 9337 %{ 9338 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift))); 9339 9340 expand %{ 9341 rolI_rReg_imm1(dst, cr); 9342 %} 9343 %} 9344 9345 // Rotate Left by 8-bit immediate 9346 instruct rolI_rReg_i8(rRegI dst, immI8 lshift, immI8 rshift, rFlagsReg cr) 9347 %{ 9348 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f)); 9349 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift))); 9350 9351 expand %{ 9352 rolI_rReg_imm8(dst, lshift, cr); 9353 %} 9354 %} 9355 9356 // Rotate Left by variable 9357 instruct rolI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr) 9358 %{ 9359 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI zero shift)))); 9360 9361 expand %{ 9362 rolI_rReg_CL(dst, shift, cr); 9363 %} 9364 %} 9365 9366 // Rotate Left by variable 9367 instruct rolI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr) 9368 %{ 9369 match(Set dst (OrI (LShiftI dst shift) (URShiftI dst (SubI c32 shift)))); 9370 9371 expand %{ 9372 rolI_rReg_CL(dst, shift, cr); 9373 %} 9374 %} 9375 9376 // ROR expand 9377 instruct rorI_rReg_imm1(rRegI dst, rFlagsReg cr) 9378 %{ 9379 effect(USE_DEF dst, KILL cr); 9380 9381 format %{ "rorl $dst" %} 9382 opcode(0xD1, 0x1); /* D1 /1 */ 9383 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 9384 ins_pipe(ialu_reg); 9385 %} 9386 9387 instruct rorI_rReg_imm8(rRegI dst, immI8 shift, rFlagsReg cr) 9388 %{ 9389 effect(USE_DEF dst, USE shift, KILL cr); 9390 9391 format %{ "rorl $dst, $shift" %} 9392 opcode(0xC1, 0x1); /* C1 /1 ib */ 9393 ins_encode(reg_opc_imm(dst, shift)); 9394 ins_pipe(ialu_reg); 9395 %} 9396 9397 instruct rorI_rReg_CL(no_rcx_RegI dst, rcx_RegI shift, rFlagsReg cr) 9398 %{ 9399 effect(USE_DEF dst, USE shift, KILL cr); 9400 9401 format %{ "rorl $dst, $shift" %} 9402 opcode(0xD3, 0x1); /* D3 /1 */ 9403 ins_encode(REX_reg(dst), OpcP, reg_opc(dst)); 9404 ins_pipe(ialu_reg_reg); 9405 %} 9406 // end of ROR expand 9407 9408 // Rotate Right by one 9409 instruct rorI_rReg_i1(rRegI dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr) 9410 %{ 9411 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift))); 9412 9413 expand %{ 9414 rorI_rReg_imm1(dst, cr); 9415 %} 9416 %} 9417 9418 // Rotate Right by 8-bit immediate 9419 instruct rorI_rReg_i8(rRegI dst, immI8 rshift, immI8 lshift, rFlagsReg cr) 9420 %{ 9421 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f)); 9422 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift))); 9423 9424 expand %{ 9425 rorI_rReg_imm8(dst, rshift, cr); 9426 %} 9427 %} 9428 9429 // Rotate Right by variable 9430 instruct rorI_rReg_Var_C0(no_rcx_RegI dst, rcx_RegI shift, immI0 zero, rFlagsReg cr) 9431 %{ 9432 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI zero shift)))); 9433 9434 expand %{ 9435 rorI_rReg_CL(dst, shift, cr); 9436 %} 9437 %} 9438 9439 // Rotate Right by variable 9440 instruct rorI_rReg_Var_C32(no_rcx_RegI dst, rcx_RegI shift, immI_32 c32, rFlagsReg cr) 9441 %{ 9442 match(Set dst (OrI (URShiftI dst shift) (LShiftI dst (SubI c32 shift)))); 9443 9444 expand %{ 9445 rorI_rReg_CL(dst, shift, cr); 9446 %} 9447 %} 9448 9449 // for long rotate 9450 // ROL expand 9451 instruct rolL_rReg_imm1(rRegL dst, rFlagsReg cr) %{ 9452 effect(USE_DEF dst, KILL cr); 9453 9454 format %{ "rolq $dst" %} 9455 opcode(0xD1, 0x0); /* Opcode D1 /0 */ 9456 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9457 ins_pipe(ialu_reg); 9458 %} 9459 9460 instruct rolL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) %{ 9461 effect(USE_DEF dst, USE shift, KILL cr); 9462 9463 format %{ "rolq $dst, $shift" %} 9464 opcode(0xC1, 0x0); /* Opcode C1 /0 ib */ 9465 ins_encode( reg_opc_imm_wide(dst, shift) ); 9466 ins_pipe(ialu_reg); 9467 %} 9468 9469 instruct rolL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr) 9470 %{ 9471 effect(USE_DEF dst, USE shift, KILL cr); 9472 9473 format %{ "rolq $dst, $shift" %} 9474 opcode(0xD3, 0x0); /* Opcode D3 /0 */ 9475 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9476 ins_pipe(ialu_reg_reg); 9477 %} 9478 // end of ROL expand 9479 9480 // Rotate Left by one 9481 instruct rolL_rReg_i1(rRegL dst, immI1 lshift, immI_M1 rshift, rFlagsReg cr) 9482 %{ 9483 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift))); 9484 9485 expand %{ 9486 rolL_rReg_imm1(dst, cr); 9487 %} 9488 %} 9489 9490 // Rotate Left by 8-bit immediate 9491 instruct rolL_rReg_i8(rRegL dst, immI8 lshift, immI8 rshift, rFlagsReg cr) 9492 %{ 9493 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f)); 9494 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift))); 9495 9496 expand %{ 9497 rolL_rReg_imm8(dst, lshift, cr); 9498 %} 9499 %} 9500 9501 // Rotate Left by variable 9502 instruct rolL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr) 9503 %{ 9504 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI zero shift)))); 9505 9506 expand %{ 9507 rolL_rReg_CL(dst, shift, cr); 9508 %} 9509 %} 9510 9511 // Rotate Left by variable 9512 instruct rolL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr) 9513 %{ 9514 match(Set dst (OrL (LShiftL dst shift) (URShiftL dst (SubI c64 shift)))); 9515 9516 expand %{ 9517 rolL_rReg_CL(dst, shift, cr); 9518 %} 9519 %} 9520 9521 // ROR expand 9522 instruct rorL_rReg_imm1(rRegL dst, rFlagsReg cr) 9523 %{ 9524 effect(USE_DEF dst, KILL cr); 9525 9526 format %{ "rorq $dst" %} 9527 opcode(0xD1, 0x1); /* D1 /1 */ 9528 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9529 ins_pipe(ialu_reg); 9530 %} 9531 9532 instruct rorL_rReg_imm8(rRegL dst, immI8 shift, rFlagsReg cr) 9533 %{ 9534 effect(USE_DEF dst, USE shift, KILL cr); 9535 9536 format %{ "rorq $dst, $shift" %} 9537 opcode(0xC1, 0x1); /* C1 /1 ib */ 9538 ins_encode(reg_opc_imm_wide(dst, shift)); 9539 ins_pipe(ialu_reg); 9540 %} 9541 9542 instruct rorL_rReg_CL(no_rcx_RegL dst, rcx_RegI shift, rFlagsReg cr) 9543 %{ 9544 effect(USE_DEF dst, USE shift, KILL cr); 9545 9546 format %{ "rorq $dst, $shift" %} 9547 opcode(0xD3, 0x1); /* D3 /1 */ 9548 ins_encode(REX_reg_wide(dst), OpcP, reg_opc(dst)); 9549 ins_pipe(ialu_reg_reg); 9550 %} 9551 // end of ROR expand 9552 9553 // Rotate Right by one 9554 instruct rorL_rReg_i1(rRegL dst, immI1 rshift, immI_M1 lshift, rFlagsReg cr) 9555 %{ 9556 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift))); 9557 9558 expand %{ 9559 rorL_rReg_imm1(dst, cr); 9560 %} 9561 %} 9562 9563 // Rotate Right by 8-bit immediate 9564 instruct rorL_rReg_i8(rRegL dst, immI8 rshift, immI8 lshift, rFlagsReg cr) 9565 %{ 9566 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f)); 9567 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift))); 9568 9569 expand %{ 9570 rorL_rReg_imm8(dst, rshift, cr); 9571 %} 9572 %} 9573 9574 // Rotate Right by variable 9575 instruct rorL_rReg_Var_C0(no_rcx_RegL dst, rcx_RegI shift, immI0 zero, rFlagsReg cr) 9576 %{ 9577 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI zero shift)))); 9578 9579 expand %{ 9580 rorL_rReg_CL(dst, shift, cr); 9581 %} 9582 %} 9583 9584 // Rotate Right by variable 9585 instruct rorL_rReg_Var_C64(no_rcx_RegL dst, rcx_RegI shift, immI_64 c64, rFlagsReg cr) 9586 %{ 9587 match(Set dst (OrL (URShiftL dst shift) (LShiftL dst (SubI c64 shift)))); 9588 9589 expand %{ 9590 rorL_rReg_CL(dst, shift, cr); 9591 %} 9592 %} 9593 9594 // Logical Instructions 9595 9596 // Integer Logical Instructions 9597 9598 // And Instructions 9599 // And Register with Register 9600 instruct andI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 9601 %{ 9602 match(Set dst (AndI dst src)); 9603 effect(KILL cr); 9604 9605 format %{ "andl $dst, $src\t# int" %} 9606 opcode(0x23); 9607 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src)); 9608 ins_pipe(ialu_reg_reg); 9609 %} 9610 9611 // And Register with Immediate 255 9612 instruct andI_rReg_imm255(rRegI dst, immI_255 src) 9613 %{ 9614 match(Set dst (AndI dst src)); 9615 9616 format %{ "movzbl $dst, $dst\t# int & 0xFF" %} 9617 opcode(0x0F, 0xB6); 9618 ins_encode(REX_reg_breg(dst, dst), OpcP, OpcS, reg_reg(dst, dst)); 9619 ins_pipe(ialu_reg); 9620 %} 9621 9622 // And Register with Immediate 255 and promote to long 9623 instruct andI2L_rReg_imm255(rRegL dst, rRegI src, immI_255 mask) 9624 %{ 9625 match(Set dst (ConvI2L (AndI src mask))); 9626 9627 format %{ "movzbl $dst, $src\t# int & 0xFF -> long" %} 9628 opcode(0x0F, 0xB6); 9629 ins_encode(REX_reg_breg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 9630 ins_pipe(ialu_reg); 9631 %} 9632 9633 // And Register with Immediate 65535 9634 instruct andI_rReg_imm65535(rRegI dst, immI_65535 src) 9635 %{ 9636 match(Set dst (AndI dst src)); 9637 9638 format %{ "movzwl $dst, $dst\t# int & 0xFFFF" %} 9639 opcode(0x0F, 0xB7); 9640 ins_encode(REX_reg_reg(dst, dst), OpcP, OpcS, reg_reg(dst, dst)); 9641 ins_pipe(ialu_reg); 9642 %} 9643 9644 // And Register with Immediate 65535 and promote to long 9645 instruct andI2L_rReg_imm65535(rRegL dst, rRegI src, immI_65535 mask) 9646 %{ 9647 match(Set dst (ConvI2L (AndI src mask))); 9648 9649 format %{ "movzwl $dst, $src\t# int & 0xFFFF -> long" %} 9650 opcode(0x0F, 0xB7); 9651 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 9652 ins_pipe(ialu_reg); 9653 %} 9654 9655 // And Register with Immediate 9656 instruct andI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 9657 %{ 9658 match(Set dst (AndI dst src)); 9659 effect(KILL cr); 9660 9661 format %{ "andl $dst, $src\t# int" %} 9662 opcode(0x81, 0x04); /* Opcode 81 /4 */ 9663 ins_encode(OpcSErm(dst, src), Con8or32(src)); 9664 ins_pipe(ialu_reg); 9665 %} 9666 9667 // And Register with Memory 9668 instruct andI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 9669 %{ 9670 match(Set dst (AndI dst (LoadI src))); 9671 effect(KILL cr); 9672 9673 ins_cost(125); 9674 format %{ "andl $dst, $src\t# int" %} 9675 opcode(0x23); 9676 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 9677 ins_pipe(ialu_reg_mem); 9678 %} 9679 9680 // And Memory with Register 9681 instruct andI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9682 %{ 9683 match(Set dst (StoreI dst (AndI (LoadI dst) src))); 9684 effect(KILL cr); 9685 9686 ins_cost(150); 9687 format %{ "andl $dst, $src\t# int" %} 9688 opcode(0x21); /* Opcode 21 /r */ 9689 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 9690 ins_pipe(ialu_mem_reg); 9691 %} 9692 9693 // And Memory with Immediate 9694 instruct andI_mem_imm(memory dst, immI src, rFlagsReg cr) 9695 %{ 9696 match(Set dst (StoreI dst (AndI (LoadI dst) src))); 9697 effect(KILL cr); 9698 9699 ins_cost(125); 9700 format %{ "andl $dst, $src\t# int" %} 9701 opcode(0x81, 0x4); /* Opcode 81 /4 id */ 9702 ins_encode(REX_mem(dst), OpcSE(src), 9703 RM_opc_mem(secondary, dst), Con8or32(src)); 9704 ins_pipe(ialu_mem_imm); 9705 %} 9706 9707 // Or Instructions 9708 // Or Register with Register 9709 instruct orI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 9710 %{ 9711 match(Set dst (OrI dst src)); 9712 effect(KILL cr); 9713 9714 format %{ "orl $dst, $src\t# int" %} 9715 opcode(0x0B); 9716 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src)); 9717 ins_pipe(ialu_reg_reg); 9718 %} 9719 9720 // Or Register with Immediate 9721 instruct orI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 9722 %{ 9723 match(Set dst (OrI dst src)); 9724 effect(KILL cr); 9725 9726 format %{ "orl $dst, $src\t# int" %} 9727 opcode(0x81, 0x01); /* Opcode 81 /1 id */ 9728 ins_encode(OpcSErm(dst, src), Con8or32(src)); 9729 ins_pipe(ialu_reg); 9730 %} 9731 9732 // Or Register with Memory 9733 instruct orI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 9734 %{ 9735 match(Set dst (OrI dst (LoadI src))); 9736 effect(KILL cr); 9737 9738 ins_cost(125); 9739 format %{ "orl $dst, $src\t# int" %} 9740 opcode(0x0B); 9741 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 9742 ins_pipe(ialu_reg_mem); 9743 %} 9744 9745 // Or Memory with Register 9746 instruct orI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9747 %{ 9748 match(Set dst (StoreI dst (OrI (LoadI dst) src))); 9749 effect(KILL cr); 9750 9751 ins_cost(150); 9752 format %{ "orl $dst, $src\t# int" %} 9753 opcode(0x09); /* Opcode 09 /r */ 9754 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 9755 ins_pipe(ialu_mem_reg); 9756 %} 9757 9758 // Or Memory with Immediate 9759 instruct orI_mem_imm(memory dst, immI src, rFlagsReg cr) 9760 %{ 9761 match(Set dst (StoreI dst (OrI (LoadI dst) src))); 9762 effect(KILL cr); 9763 9764 ins_cost(125); 9765 format %{ "orl $dst, $src\t# int" %} 9766 opcode(0x81, 0x1); /* Opcode 81 /1 id */ 9767 ins_encode(REX_mem(dst), OpcSE(src), 9768 RM_opc_mem(secondary, dst), Con8or32(src)); 9769 ins_pipe(ialu_mem_imm); 9770 %} 9771 9772 // Xor Instructions 9773 // Xor Register with Register 9774 instruct xorI_rReg(rRegI dst, rRegI src, rFlagsReg cr) 9775 %{ 9776 match(Set dst (XorI dst src)); 9777 effect(KILL cr); 9778 9779 format %{ "xorl $dst, $src\t# int" %} 9780 opcode(0x33); 9781 ins_encode(REX_reg_reg(dst, src), OpcP, reg_reg(dst, src)); 9782 ins_pipe(ialu_reg_reg); 9783 %} 9784 9785 // Xor Register with Immediate -1 9786 instruct xorI_rReg_im1(rRegI dst, immI_M1 imm) %{ 9787 match(Set dst (XorI dst imm)); 9788 9789 format %{ "not $dst" %} 9790 ins_encode %{ 9791 __ notl($dst$$Register); 9792 %} 9793 ins_pipe(ialu_reg); 9794 %} 9795 9796 // Xor Register with Immediate 9797 instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) 9798 %{ 9799 match(Set dst (XorI dst src)); 9800 effect(KILL cr); 9801 9802 format %{ "xorl $dst, $src\t# int" %} 9803 opcode(0x81, 0x06); /* Opcode 81 /6 id */ 9804 ins_encode(OpcSErm(dst, src), Con8or32(src)); 9805 ins_pipe(ialu_reg); 9806 %} 9807 9808 // Xor Register with Memory 9809 instruct xorI_rReg_mem(rRegI dst, memory src, rFlagsReg cr) 9810 %{ 9811 match(Set dst (XorI dst (LoadI src))); 9812 effect(KILL cr); 9813 9814 ins_cost(125); 9815 format %{ "xorl $dst, $src\t# int" %} 9816 opcode(0x33); 9817 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 9818 ins_pipe(ialu_reg_mem); 9819 %} 9820 9821 // Xor Memory with Register 9822 instruct xorI_mem_rReg(memory dst, rRegI src, rFlagsReg cr) 9823 %{ 9824 match(Set dst (StoreI dst (XorI (LoadI dst) src))); 9825 effect(KILL cr); 9826 9827 ins_cost(150); 9828 format %{ "xorl $dst, $src\t# int" %} 9829 opcode(0x31); /* Opcode 31 /r */ 9830 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 9831 ins_pipe(ialu_mem_reg); 9832 %} 9833 9834 // Xor Memory with Immediate 9835 instruct xorI_mem_imm(memory dst, immI src, rFlagsReg cr) 9836 %{ 9837 match(Set dst (StoreI dst (XorI (LoadI dst) src))); 9838 effect(KILL cr); 9839 9840 ins_cost(125); 9841 format %{ "xorl $dst, $src\t# int" %} 9842 opcode(0x81, 0x6); /* Opcode 81 /6 id */ 9843 ins_encode(REX_mem(dst), OpcSE(src), 9844 RM_opc_mem(secondary, dst), Con8or32(src)); 9845 ins_pipe(ialu_mem_imm); 9846 %} 9847 9848 9849 // Long Logical Instructions 9850 9851 // And Instructions 9852 // And Register with Register 9853 instruct andL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 9854 %{ 9855 match(Set dst (AndL dst src)); 9856 effect(KILL cr); 9857 9858 format %{ "andq $dst, $src\t# long" %} 9859 opcode(0x23); 9860 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 9861 ins_pipe(ialu_reg_reg); 9862 %} 9863 9864 // And Register with Immediate 255 9865 instruct andL_rReg_imm255(rRegL dst, immL_255 src) 9866 %{ 9867 match(Set dst (AndL dst src)); 9868 9869 format %{ "movzbq $dst, $dst\t# long & 0xFF" %} 9870 opcode(0x0F, 0xB6); 9871 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst)); 9872 ins_pipe(ialu_reg); 9873 %} 9874 9875 // And Register with Immediate 65535 9876 instruct andL_rReg_imm65535(rRegL dst, immL_65535 src) 9877 %{ 9878 match(Set dst (AndL dst src)); 9879 9880 format %{ "movzwq $dst, $dst\t# long & 0xFFFF" %} 9881 opcode(0x0F, 0xB7); 9882 ins_encode(REX_reg_reg_wide(dst, dst), OpcP, OpcS, reg_reg(dst, dst)); 9883 ins_pipe(ialu_reg); 9884 %} 9885 9886 // And Register with Immediate 9887 instruct andL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 9888 %{ 9889 match(Set dst (AndL dst src)); 9890 effect(KILL cr); 9891 9892 format %{ "andq $dst, $src\t# long" %} 9893 opcode(0x81, 0x04); /* Opcode 81 /4 */ 9894 ins_encode(OpcSErm_wide(dst, src), Con8or32(src)); 9895 ins_pipe(ialu_reg); 9896 %} 9897 9898 // And Register with Memory 9899 instruct andL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 9900 %{ 9901 match(Set dst (AndL dst (LoadL src))); 9902 effect(KILL cr); 9903 9904 ins_cost(125); 9905 format %{ "andq $dst, $src\t# long" %} 9906 opcode(0x23); 9907 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 9908 ins_pipe(ialu_reg_mem); 9909 %} 9910 9911 // And Memory with Register 9912 instruct andL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 9913 %{ 9914 match(Set dst (StoreL dst (AndL (LoadL dst) src))); 9915 effect(KILL cr); 9916 9917 ins_cost(150); 9918 format %{ "andq $dst, $src\t# long" %} 9919 opcode(0x21); /* Opcode 21 /r */ 9920 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 9921 ins_pipe(ialu_mem_reg); 9922 %} 9923 9924 // And Memory with Immediate 9925 instruct andL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 9926 %{ 9927 match(Set dst (StoreL dst (AndL (LoadL dst) src))); 9928 effect(KILL cr); 9929 9930 ins_cost(125); 9931 format %{ "andq $dst, $src\t# long" %} 9932 opcode(0x81, 0x4); /* Opcode 81 /4 id */ 9933 ins_encode(REX_mem_wide(dst), OpcSE(src), 9934 RM_opc_mem(secondary, dst), Con8or32(src)); 9935 ins_pipe(ialu_mem_imm); 9936 %} 9937 9938 // Or Instructions 9939 // Or Register with Register 9940 instruct orL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 9941 %{ 9942 match(Set dst (OrL dst src)); 9943 effect(KILL cr); 9944 9945 format %{ "orq $dst, $src\t# long" %} 9946 opcode(0x0B); 9947 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 9948 ins_pipe(ialu_reg_reg); 9949 %} 9950 9951 // Use any_RegP to match R15 (TLS register) without spilling. 9952 instruct orL_rReg_castP2X(rRegL dst, any_RegP src, rFlagsReg cr) %{ 9953 match(Set dst (OrL dst (CastP2X src))); 9954 effect(KILL cr); 9955 9956 format %{ "orq $dst, $src\t# long" %} 9957 opcode(0x0B); 9958 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 9959 ins_pipe(ialu_reg_reg); 9960 %} 9961 9962 9963 // Or Register with Immediate 9964 instruct orL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 9965 %{ 9966 match(Set dst (OrL dst src)); 9967 effect(KILL cr); 9968 9969 format %{ "orq $dst, $src\t# long" %} 9970 opcode(0x81, 0x01); /* Opcode 81 /1 id */ 9971 ins_encode(OpcSErm_wide(dst, src), Con8or32(src)); 9972 ins_pipe(ialu_reg); 9973 %} 9974 9975 // Or Register with Memory 9976 instruct orL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 9977 %{ 9978 match(Set dst (OrL dst (LoadL src))); 9979 effect(KILL cr); 9980 9981 ins_cost(125); 9982 format %{ "orq $dst, $src\t# long" %} 9983 opcode(0x0B); 9984 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 9985 ins_pipe(ialu_reg_mem); 9986 %} 9987 9988 // Or Memory with Register 9989 instruct orL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 9990 %{ 9991 match(Set dst (StoreL dst (OrL (LoadL dst) src))); 9992 effect(KILL cr); 9993 9994 ins_cost(150); 9995 format %{ "orq $dst, $src\t# long" %} 9996 opcode(0x09); /* Opcode 09 /r */ 9997 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 9998 ins_pipe(ialu_mem_reg); 9999 %} 10000 10001 // Or Memory with Immediate 10002 instruct orL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 10003 %{ 10004 match(Set dst (StoreL dst (OrL (LoadL dst) src))); 10005 effect(KILL cr); 10006 10007 ins_cost(125); 10008 format %{ "orq $dst, $src\t# long" %} 10009 opcode(0x81, 0x1); /* Opcode 81 /1 id */ 10010 ins_encode(REX_mem_wide(dst), OpcSE(src), 10011 RM_opc_mem(secondary, dst), Con8or32(src)); 10012 ins_pipe(ialu_mem_imm); 10013 %} 10014 10015 // Xor Instructions 10016 // Xor Register with Register 10017 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr) 10018 %{ 10019 match(Set dst (XorL dst src)); 10020 effect(KILL cr); 10021 10022 format %{ "xorq $dst, $src\t# long" %} 10023 opcode(0x33); 10024 ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst, src)); 10025 ins_pipe(ialu_reg_reg); 10026 %} 10027 10028 // Xor Register with Immediate -1 10029 instruct xorL_rReg_im1(rRegL dst, immL_M1 imm) %{ 10030 match(Set dst (XorL dst imm)); 10031 10032 format %{ "notq $dst" %} 10033 ins_encode %{ 10034 __ notq($dst$$Register); 10035 %} 10036 ins_pipe(ialu_reg); 10037 %} 10038 10039 // Xor Register with Immediate 10040 instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) 10041 %{ 10042 match(Set dst (XorL dst src)); 10043 effect(KILL cr); 10044 10045 format %{ "xorq $dst, $src\t# long" %} 10046 opcode(0x81, 0x06); /* Opcode 81 /6 id */ 10047 ins_encode(OpcSErm_wide(dst, src), Con8or32(src)); 10048 ins_pipe(ialu_reg); 10049 %} 10050 10051 // Xor Register with Memory 10052 instruct xorL_rReg_mem(rRegL dst, memory src, rFlagsReg cr) 10053 %{ 10054 match(Set dst (XorL dst (LoadL src))); 10055 effect(KILL cr); 10056 10057 ins_cost(125); 10058 format %{ "xorq $dst, $src\t# long" %} 10059 opcode(0x33); 10060 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 10061 ins_pipe(ialu_reg_mem); 10062 %} 10063 10064 // Xor Memory with Register 10065 instruct xorL_mem_rReg(memory dst, rRegL src, rFlagsReg cr) 10066 %{ 10067 match(Set dst (StoreL dst (XorL (LoadL dst) src))); 10068 effect(KILL cr); 10069 10070 ins_cost(150); 10071 format %{ "xorq $dst, $src\t# long" %} 10072 opcode(0x31); /* Opcode 31 /r */ 10073 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 10074 ins_pipe(ialu_mem_reg); 10075 %} 10076 10077 // Xor Memory with Immediate 10078 instruct xorL_mem_imm(memory dst, immL32 src, rFlagsReg cr) 10079 %{ 10080 match(Set dst (StoreL dst (XorL (LoadL dst) src))); 10081 effect(KILL cr); 10082 10083 ins_cost(125); 10084 format %{ "xorq $dst, $src\t# long" %} 10085 opcode(0x81, 0x6); /* Opcode 81 /6 id */ 10086 ins_encode(REX_mem_wide(dst), OpcSE(src), 10087 RM_opc_mem(secondary, dst), Con8or32(src)); 10088 ins_pipe(ialu_mem_imm); 10089 %} 10090 10091 // Convert Int to Boolean 10092 instruct convI2B(rRegI dst, rRegI src, rFlagsReg cr) 10093 %{ 10094 match(Set dst (Conv2B src)); 10095 effect(KILL cr); 10096 10097 format %{ "testl $src, $src\t# ci2b\n\t" 10098 "setnz $dst\n\t" 10099 "movzbl $dst, $dst" %} 10100 ins_encode(REX_reg_reg(src, src), opc_reg_reg(0x85, src, src), // testl 10101 setNZ_reg(dst), 10102 REX_reg_breg(dst, dst), // movzbl 10103 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst)); 10104 ins_pipe(pipe_slow); // XXX 10105 %} 10106 10107 // Convert Pointer to Boolean 10108 instruct convP2B(rRegI dst, rRegP src, rFlagsReg cr) 10109 %{ 10110 match(Set dst (Conv2B src)); 10111 effect(KILL cr); 10112 10113 format %{ "testq $src, $src\t# cp2b\n\t" 10114 "setnz $dst\n\t" 10115 "movzbl $dst, $dst" %} 10116 ins_encode(REX_reg_reg_wide(src, src), opc_reg_reg(0x85, src, src), // testq 10117 setNZ_reg(dst), 10118 REX_reg_breg(dst, dst), // movzbl 10119 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst)); 10120 ins_pipe(pipe_slow); // XXX 10121 %} 10122 10123 instruct cmpLTMask(rRegI dst, rRegI p, rRegI q, rFlagsReg cr) 10124 %{ 10125 match(Set dst (CmpLTMask p q)); 10126 effect(KILL cr); 10127 10128 ins_cost(400); // XXX 10129 format %{ "cmpl $p, $q\t# cmpLTMask\n\t" 10130 "setlt $dst\n\t" 10131 "movzbl $dst, $dst\n\t" 10132 "negl $dst" %} 10133 ins_encode(REX_reg_reg(p, q), opc_reg_reg(0x3B, p, q), // cmpl 10134 setLT_reg(dst), 10135 REX_reg_breg(dst, dst), // movzbl 10136 Opcode(0x0F), Opcode(0xB6), reg_reg(dst, dst), 10137 neg_reg(dst)); 10138 ins_pipe(pipe_slow); 10139 %} 10140 10141 instruct cmpLTMask0(rRegI dst, immI0 zero, rFlagsReg cr) 10142 %{ 10143 match(Set dst (CmpLTMask dst zero)); 10144 effect(KILL cr); 10145 10146 ins_cost(100); // XXX 10147 format %{ "sarl $dst, #31\t# cmpLTMask0" %} 10148 opcode(0xC1, 0x7); /* C1 /7 ib */ 10149 ins_encode(reg_opc_imm(dst, 0x1F)); 10150 ins_pipe(ialu_reg); 10151 %} 10152 10153 10154 instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr) 10155 %{ 10156 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q))); 10157 effect(TEMP tmp, KILL cr); 10158 10159 ins_cost(400); // XXX 10160 format %{ "subl $p, $q\t# cadd_cmpLTMask1\n\t" 10161 "sbbl $tmp, $tmp\n\t" 10162 "andl $tmp, $y\n\t" 10163 "addl $p, $tmp" %} 10164 ins_encode %{ 10165 Register Rp = $p$$Register; 10166 Register Rq = $q$$Register; 10167 Register Ry = $y$$Register; 10168 Register Rt = $tmp$$Register; 10169 __ subl(Rp, Rq); 10170 __ sbbl(Rt, Rt); 10171 __ andl(Rt, Ry); 10172 __ addl(Rp, Rt); 10173 %} 10174 ins_pipe(pipe_cmplt); 10175 %} 10176 10177 //---------- FP Instructions------------------------------------------------ 10178 10179 instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2) 10180 %{ 10181 match(Set cr (CmpF src1 src2)); 10182 10183 ins_cost(145); 10184 format %{ "ucomiss $src1, $src2\n\t" 10185 "jnp,s exit\n\t" 10186 "pushfq\t# saw NaN, set CF\n\t" 10187 "andq [rsp], #0xffffff2b\n\t" 10188 "popfq\n" 10189 "exit: nop\t# avoid branch to branch" %} 10190 opcode(0x0F, 0x2E); 10191 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2), 10192 cmpfp_fixup); 10193 ins_pipe(pipe_slow); 10194 %} 10195 10196 instruct cmpF_cc_reg_CF(rFlagsRegUCF cr, regF src1, regF src2) %{ 10197 match(Set cr (CmpF src1 src2)); 10198 10199 ins_cost(145); 10200 format %{ "ucomiss $src1, $src2" %} 10201 ins_encode %{ 10202 __ ucomiss($src1$$XMMRegister, $src2$$XMMRegister); 10203 %} 10204 ins_pipe(pipe_slow); 10205 %} 10206 10207 instruct cmpF_cc_mem(rFlagsRegU cr, regF src1, memory src2) 10208 %{ 10209 match(Set cr (CmpF src1 (LoadF src2))); 10210 10211 ins_cost(145); 10212 format %{ "ucomiss $src1, $src2\n\t" 10213 "jnp,s exit\n\t" 10214 "pushfq\t# saw NaN, set CF\n\t" 10215 "andq [rsp], #0xffffff2b\n\t" 10216 "popfq\n" 10217 "exit: nop\t# avoid branch to branch" %} 10218 opcode(0x0F, 0x2E); 10219 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2), 10220 cmpfp_fixup); 10221 ins_pipe(pipe_slow); 10222 %} 10223 10224 instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{ 10225 match(Set cr (CmpF src1 (LoadF src2))); 10226 10227 ins_cost(100); 10228 format %{ "ucomiss $src1, $src2" %} 10229 opcode(0x0F, 0x2E); 10230 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2)); 10231 ins_pipe(pipe_slow); 10232 %} 10233 10234 instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{ 10235 match(Set cr (CmpF src con)); 10236 10237 ins_cost(145); 10238 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 10239 "jnp,s exit\n\t" 10240 "pushfq\t# saw NaN, set CF\n\t" 10241 "andq [rsp], #0xffffff2b\n\t" 10242 "popfq\n" 10243 "exit: nop\t# avoid branch to branch" %} 10244 ins_encode %{ 10245 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10246 emit_cmpfp_fixup(_masm); 10247 %} 10248 ins_pipe(pipe_slow); 10249 %} 10250 10251 instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{ 10252 match(Set cr (CmpF src con)); 10253 ins_cost(100); 10254 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %} 10255 ins_encode %{ 10256 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10257 %} 10258 ins_pipe(pipe_slow); 10259 %} 10260 10261 instruct cmpD_cc_reg(rFlagsRegU cr, regD src1, regD src2) 10262 %{ 10263 match(Set cr (CmpD src1 src2)); 10264 10265 ins_cost(145); 10266 format %{ "ucomisd $src1, $src2\n\t" 10267 "jnp,s exit\n\t" 10268 "pushfq\t# saw NaN, set CF\n\t" 10269 "andq [rsp], #0xffffff2b\n\t" 10270 "popfq\n" 10271 "exit: nop\t# avoid branch to branch" %} 10272 opcode(0x66, 0x0F, 0x2E); 10273 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2), 10274 cmpfp_fixup); 10275 ins_pipe(pipe_slow); 10276 %} 10277 10278 instruct cmpD_cc_reg_CF(rFlagsRegUCF cr, regD src1, regD src2) %{ 10279 match(Set cr (CmpD src1 src2)); 10280 10281 ins_cost(100); 10282 format %{ "ucomisd $src1, $src2 test" %} 10283 ins_encode %{ 10284 __ ucomisd($src1$$XMMRegister, $src2$$XMMRegister); 10285 %} 10286 ins_pipe(pipe_slow); 10287 %} 10288 10289 instruct cmpD_cc_mem(rFlagsRegU cr, regD src1, memory src2) 10290 %{ 10291 match(Set cr (CmpD src1 (LoadD src2))); 10292 10293 ins_cost(145); 10294 format %{ "ucomisd $src1, $src2\n\t" 10295 "jnp,s exit\n\t" 10296 "pushfq\t# saw NaN, set CF\n\t" 10297 "andq [rsp], #0xffffff2b\n\t" 10298 "popfq\n" 10299 "exit: nop\t# avoid branch to branch" %} 10300 opcode(0x66, 0x0F, 0x2E); 10301 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2), 10302 cmpfp_fixup); 10303 ins_pipe(pipe_slow); 10304 %} 10305 10306 instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{ 10307 match(Set cr (CmpD src1 (LoadD src2))); 10308 10309 ins_cost(100); 10310 format %{ "ucomisd $src1, $src2" %} 10311 opcode(0x66, 0x0F, 0x2E); 10312 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2)); 10313 ins_pipe(pipe_slow); 10314 %} 10315 10316 instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{ 10317 match(Set cr (CmpD src con)); 10318 10319 ins_cost(145); 10320 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 10321 "jnp,s exit\n\t" 10322 "pushfq\t# saw NaN, set CF\n\t" 10323 "andq [rsp], #0xffffff2b\n\t" 10324 "popfq\n" 10325 "exit: nop\t# avoid branch to branch" %} 10326 ins_encode %{ 10327 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10328 emit_cmpfp_fixup(_masm); 10329 %} 10330 ins_pipe(pipe_slow); 10331 %} 10332 10333 instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{ 10334 match(Set cr (CmpD src con)); 10335 ins_cost(100); 10336 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %} 10337 ins_encode %{ 10338 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10339 %} 10340 ins_pipe(pipe_slow); 10341 %} 10342 10343 // Compare into -1,0,1 10344 instruct cmpF_reg(rRegI dst, regF src1, regF src2, rFlagsReg cr) 10345 %{ 10346 match(Set dst (CmpF3 src1 src2)); 10347 effect(KILL cr); 10348 10349 ins_cost(275); 10350 format %{ "ucomiss $src1, $src2\n\t" 10351 "movl $dst, #-1\n\t" 10352 "jp,s done\n\t" 10353 "jb,s done\n\t" 10354 "setne $dst\n\t" 10355 "movzbl $dst, $dst\n" 10356 "done:" %} 10357 10358 opcode(0x0F, 0x2E); 10359 ins_encode(REX_reg_reg(src1, src2), OpcP, OpcS, reg_reg(src1, src2), 10360 cmpfp3(dst)); 10361 ins_pipe(pipe_slow); 10362 %} 10363 10364 // Compare into -1,0,1 10365 instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr) 10366 %{ 10367 match(Set dst (CmpF3 src1 (LoadF src2))); 10368 effect(KILL cr); 10369 10370 ins_cost(275); 10371 format %{ "ucomiss $src1, $src2\n\t" 10372 "movl $dst, #-1\n\t" 10373 "jp,s done\n\t" 10374 "jb,s done\n\t" 10375 "setne $dst\n\t" 10376 "movzbl $dst, $dst\n" 10377 "done:" %} 10378 10379 opcode(0x0F, 0x2E); 10380 ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, reg_mem(src1, src2), 10381 cmpfp3(dst)); 10382 ins_pipe(pipe_slow); 10383 %} 10384 10385 // Compare into -1,0,1 10386 instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{ 10387 match(Set dst (CmpF3 src con)); 10388 effect(KILL cr); 10389 10390 ins_cost(275); 10391 format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 10392 "movl $dst, #-1\n\t" 10393 "jp,s done\n\t" 10394 "jb,s done\n\t" 10395 "setne $dst\n\t" 10396 "movzbl $dst, $dst\n" 10397 "done:" %} 10398 ins_encode %{ 10399 Label L_done; 10400 Register Rdst = $dst$$Register; 10401 __ ucomiss($src$$XMMRegister, $constantaddress($con)); 10402 __ movl(Rdst, -1); 10403 __ jcc(Assembler::parity, L_done); 10404 __ jcc(Assembler::below, L_done); 10405 __ setb(Assembler::notEqual, Rdst); 10406 __ movzbl(Rdst, Rdst); 10407 __ bind(L_done); 10408 %} 10409 ins_pipe(pipe_slow); 10410 %} 10411 10412 // Compare into -1,0,1 10413 instruct cmpD_reg(rRegI dst, regD src1, regD src2, rFlagsReg cr) 10414 %{ 10415 match(Set dst (CmpD3 src1 src2)); 10416 effect(KILL cr); 10417 10418 ins_cost(275); 10419 format %{ "ucomisd $src1, $src2\n\t" 10420 "movl $dst, #-1\n\t" 10421 "jp,s done\n\t" 10422 "jb,s done\n\t" 10423 "setne $dst\n\t" 10424 "movzbl $dst, $dst\n" 10425 "done:" %} 10426 10427 opcode(0x66, 0x0F, 0x2E); 10428 ins_encode(OpcP, REX_reg_reg(src1, src2), OpcS, OpcT, reg_reg(src1, src2), 10429 cmpfp3(dst)); 10430 ins_pipe(pipe_slow); 10431 %} 10432 10433 // Compare into -1,0,1 10434 instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr) 10435 %{ 10436 match(Set dst (CmpD3 src1 (LoadD src2))); 10437 effect(KILL cr); 10438 10439 ins_cost(275); 10440 format %{ "ucomisd $src1, $src2\n\t" 10441 "movl $dst, #-1\n\t" 10442 "jp,s done\n\t" 10443 "jb,s done\n\t" 10444 "setne $dst\n\t" 10445 "movzbl $dst, $dst\n" 10446 "done:" %} 10447 10448 opcode(0x66, 0x0F, 0x2E); 10449 ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, reg_mem(src1, src2), 10450 cmpfp3(dst)); 10451 ins_pipe(pipe_slow); 10452 %} 10453 10454 // Compare into -1,0,1 10455 instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{ 10456 match(Set dst (CmpD3 src con)); 10457 effect(KILL cr); 10458 10459 ins_cost(275); 10460 format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 10461 "movl $dst, #-1\n\t" 10462 "jp,s done\n\t" 10463 "jb,s done\n\t" 10464 "setne $dst\n\t" 10465 "movzbl $dst, $dst\n" 10466 "done:" %} 10467 ins_encode %{ 10468 Register Rdst = $dst$$Register; 10469 Label L_done; 10470 __ ucomisd($src$$XMMRegister, $constantaddress($con)); 10471 __ movl(Rdst, -1); 10472 __ jcc(Assembler::parity, L_done); 10473 __ jcc(Assembler::below, L_done); 10474 __ setb(Assembler::notEqual, Rdst); 10475 __ movzbl(Rdst, Rdst); 10476 __ bind(L_done); 10477 %} 10478 ins_pipe(pipe_slow); 10479 %} 10480 10481 instruct addF_reg(regF dst, regF src) 10482 %{ 10483 match(Set dst (AddF dst src)); 10484 10485 format %{ "addss $dst, $src" %} 10486 ins_cost(150); // XXX 10487 opcode(0xF3, 0x0F, 0x58); 10488 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10489 ins_pipe(pipe_slow); 10490 %} 10491 10492 instruct addF_mem(regF dst, memory src) 10493 %{ 10494 match(Set dst (AddF dst (LoadF src))); 10495 10496 format %{ "addss $dst, $src" %} 10497 ins_cost(150); // XXX 10498 opcode(0xF3, 0x0F, 0x58); 10499 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10500 ins_pipe(pipe_slow); 10501 %} 10502 10503 instruct addF_imm(regF dst, immF con) %{ 10504 match(Set dst (AddF dst con)); 10505 format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10506 ins_cost(150); // XXX 10507 ins_encode %{ 10508 __ addss($dst$$XMMRegister, $constantaddress($con)); 10509 %} 10510 ins_pipe(pipe_slow); 10511 %} 10512 10513 instruct addD_reg(regD dst, regD src) 10514 %{ 10515 match(Set dst (AddD dst src)); 10516 10517 format %{ "addsd $dst, $src" %} 10518 ins_cost(150); // XXX 10519 opcode(0xF2, 0x0F, 0x58); 10520 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10521 ins_pipe(pipe_slow); 10522 %} 10523 10524 instruct addD_mem(regD dst, memory src) 10525 %{ 10526 match(Set dst (AddD dst (LoadD src))); 10527 10528 format %{ "addsd $dst, $src" %} 10529 ins_cost(150); // XXX 10530 opcode(0xF2, 0x0F, 0x58); 10531 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10532 ins_pipe(pipe_slow); 10533 %} 10534 10535 instruct addD_imm(regD dst, immD con) %{ 10536 match(Set dst (AddD dst con)); 10537 format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10538 ins_cost(150); // XXX 10539 ins_encode %{ 10540 __ addsd($dst$$XMMRegister, $constantaddress($con)); 10541 %} 10542 ins_pipe(pipe_slow); 10543 %} 10544 10545 instruct subF_reg(regF dst, regF src) 10546 %{ 10547 match(Set dst (SubF dst src)); 10548 10549 format %{ "subss $dst, $src" %} 10550 ins_cost(150); // XXX 10551 opcode(0xF3, 0x0F, 0x5C); 10552 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10553 ins_pipe(pipe_slow); 10554 %} 10555 10556 instruct subF_mem(regF dst, memory src) 10557 %{ 10558 match(Set dst (SubF dst (LoadF src))); 10559 10560 format %{ "subss $dst, $src" %} 10561 ins_cost(150); // XXX 10562 opcode(0xF3, 0x0F, 0x5C); 10563 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10564 ins_pipe(pipe_slow); 10565 %} 10566 10567 instruct subF_imm(regF dst, immF con) %{ 10568 match(Set dst (SubF dst con)); 10569 format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10570 ins_cost(150); // XXX 10571 ins_encode %{ 10572 __ subss($dst$$XMMRegister, $constantaddress($con)); 10573 %} 10574 ins_pipe(pipe_slow); 10575 %} 10576 10577 instruct subD_reg(regD dst, regD src) 10578 %{ 10579 match(Set dst (SubD dst src)); 10580 10581 format %{ "subsd $dst, $src" %} 10582 ins_cost(150); // XXX 10583 opcode(0xF2, 0x0F, 0x5C); 10584 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10585 ins_pipe(pipe_slow); 10586 %} 10587 10588 instruct subD_mem(regD dst, memory src) 10589 %{ 10590 match(Set dst (SubD dst (LoadD src))); 10591 10592 format %{ "subsd $dst, $src" %} 10593 ins_cost(150); // XXX 10594 opcode(0xF2, 0x0F, 0x5C); 10595 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10596 ins_pipe(pipe_slow); 10597 %} 10598 10599 instruct subD_imm(regD dst, immD con) %{ 10600 match(Set dst (SubD dst con)); 10601 format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10602 ins_cost(150); // XXX 10603 ins_encode %{ 10604 __ subsd($dst$$XMMRegister, $constantaddress($con)); 10605 %} 10606 ins_pipe(pipe_slow); 10607 %} 10608 10609 instruct mulF_reg(regF dst, regF src) 10610 %{ 10611 match(Set dst (MulF dst src)); 10612 10613 format %{ "mulss $dst, $src" %} 10614 ins_cost(150); // XXX 10615 opcode(0xF3, 0x0F, 0x59); 10616 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10617 ins_pipe(pipe_slow); 10618 %} 10619 10620 instruct mulF_mem(regF dst, memory src) 10621 %{ 10622 match(Set dst (MulF dst (LoadF src))); 10623 10624 format %{ "mulss $dst, $src" %} 10625 ins_cost(150); // XXX 10626 opcode(0xF3, 0x0F, 0x59); 10627 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10628 ins_pipe(pipe_slow); 10629 %} 10630 10631 instruct mulF_imm(regF dst, immF con) %{ 10632 match(Set dst (MulF dst con)); 10633 format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10634 ins_cost(150); // XXX 10635 ins_encode %{ 10636 __ mulss($dst$$XMMRegister, $constantaddress($con)); 10637 %} 10638 ins_pipe(pipe_slow); 10639 %} 10640 10641 instruct mulD_reg(regD dst, regD src) 10642 %{ 10643 match(Set dst (MulD dst src)); 10644 10645 format %{ "mulsd $dst, $src" %} 10646 ins_cost(150); // XXX 10647 opcode(0xF2, 0x0F, 0x59); 10648 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10649 ins_pipe(pipe_slow); 10650 %} 10651 10652 instruct mulD_mem(regD dst, memory src) 10653 %{ 10654 match(Set dst (MulD dst (LoadD src))); 10655 10656 format %{ "mulsd $dst, $src" %} 10657 ins_cost(150); // XXX 10658 opcode(0xF2, 0x0F, 0x59); 10659 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10660 ins_pipe(pipe_slow); 10661 %} 10662 10663 instruct mulD_imm(regD dst, immD con) %{ 10664 match(Set dst (MulD dst con)); 10665 format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10666 ins_cost(150); // XXX 10667 ins_encode %{ 10668 __ mulsd($dst$$XMMRegister, $constantaddress($con)); 10669 %} 10670 ins_pipe(pipe_slow); 10671 %} 10672 10673 instruct divF_reg(regF dst, regF src) 10674 %{ 10675 match(Set dst (DivF dst src)); 10676 10677 format %{ "divss $dst, $src" %} 10678 ins_cost(150); // XXX 10679 opcode(0xF3, 0x0F, 0x5E); 10680 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10681 ins_pipe(pipe_slow); 10682 %} 10683 10684 instruct divF_mem(regF dst, memory src) 10685 %{ 10686 match(Set dst (DivF dst (LoadF src))); 10687 10688 format %{ "divss $dst, $src" %} 10689 ins_cost(150); // XXX 10690 opcode(0xF3, 0x0F, 0x5E); 10691 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10692 ins_pipe(pipe_slow); 10693 %} 10694 10695 instruct divF_imm(regF dst, immF con) %{ 10696 match(Set dst (DivF dst con)); 10697 format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10698 ins_cost(150); // XXX 10699 ins_encode %{ 10700 __ divss($dst$$XMMRegister, $constantaddress($con)); 10701 %} 10702 ins_pipe(pipe_slow); 10703 %} 10704 10705 instruct divD_reg(regD dst, regD src) 10706 %{ 10707 match(Set dst (DivD dst src)); 10708 10709 format %{ "divsd $dst, $src" %} 10710 ins_cost(150); // XXX 10711 opcode(0xF2, 0x0F, 0x5E); 10712 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10713 ins_pipe(pipe_slow); 10714 %} 10715 10716 instruct divD_mem(regD dst, memory src) 10717 %{ 10718 match(Set dst (DivD dst (LoadD src))); 10719 10720 format %{ "divsd $dst, $src" %} 10721 ins_cost(150); // XXX 10722 opcode(0xF2, 0x0F, 0x5E); 10723 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10724 ins_pipe(pipe_slow); 10725 %} 10726 10727 instruct divD_imm(regD dst, immD con) %{ 10728 match(Set dst (DivD dst con)); 10729 format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10730 ins_cost(150); // XXX 10731 ins_encode %{ 10732 __ divsd($dst$$XMMRegister, $constantaddress($con)); 10733 %} 10734 ins_pipe(pipe_slow); 10735 %} 10736 10737 instruct sqrtF_reg(regF dst, regF src) 10738 %{ 10739 match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 10740 10741 format %{ "sqrtss $dst, $src" %} 10742 ins_cost(150); // XXX 10743 opcode(0xF3, 0x0F, 0x51); 10744 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10745 ins_pipe(pipe_slow); 10746 %} 10747 10748 instruct sqrtF_mem(regF dst, memory src) 10749 %{ 10750 match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src))))); 10751 10752 format %{ "sqrtss $dst, $src" %} 10753 ins_cost(150); // XXX 10754 opcode(0xF3, 0x0F, 0x51); 10755 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10756 ins_pipe(pipe_slow); 10757 %} 10758 10759 instruct sqrtF_imm(regF dst, immF con) %{ 10760 match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); 10761 format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 10762 ins_cost(150); // XXX 10763 ins_encode %{ 10764 __ sqrtss($dst$$XMMRegister, $constantaddress($con)); 10765 %} 10766 ins_pipe(pipe_slow); 10767 %} 10768 10769 instruct sqrtD_reg(regD dst, regD src) 10770 %{ 10771 match(Set dst (SqrtD src)); 10772 10773 format %{ "sqrtsd $dst, $src" %} 10774 ins_cost(150); // XXX 10775 opcode(0xF2, 0x0F, 0x51); 10776 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10777 ins_pipe(pipe_slow); 10778 %} 10779 10780 instruct sqrtD_mem(regD dst, memory src) 10781 %{ 10782 match(Set dst (SqrtD (LoadD src))); 10783 10784 format %{ "sqrtsd $dst, $src" %} 10785 ins_cost(150); // XXX 10786 opcode(0xF2, 0x0F, 0x51); 10787 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10788 ins_pipe(pipe_slow); 10789 %} 10790 10791 instruct sqrtD_imm(regD dst, immD con) %{ 10792 match(Set dst (SqrtD con)); 10793 format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 10794 ins_cost(150); // XXX 10795 ins_encode %{ 10796 __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); 10797 %} 10798 ins_pipe(pipe_slow); 10799 %} 10800 10801 instruct absF_reg(regF dst) 10802 %{ 10803 match(Set dst (AbsF dst)); 10804 10805 format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %} 10806 ins_encode(absF_encoding(dst)); 10807 ins_pipe(pipe_slow); 10808 %} 10809 10810 instruct absD_reg(regD dst) 10811 %{ 10812 match(Set dst (AbsD dst)); 10813 10814 format %{ "andpd $dst, [0x7fffffffffffffff]\t" 10815 "# abs double by sign masking" %} 10816 ins_encode(absD_encoding(dst)); 10817 ins_pipe(pipe_slow); 10818 %} 10819 10820 instruct negF_reg(regF dst) 10821 %{ 10822 match(Set dst (NegF dst)); 10823 10824 format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %} 10825 ins_encode(negF_encoding(dst)); 10826 ins_pipe(pipe_slow); 10827 %} 10828 10829 instruct negD_reg(regD dst) 10830 %{ 10831 match(Set dst (NegD dst)); 10832 10833 format %{ "xorpd $dst, [0x8000000000000000]\t" 10834 "# neg double by sign flipping" %} 10835 ins_encode(negD_encoding(dst)); 10836 ins_pipe(pipe_slow); 10837 %} 10838 10839 // -----------Trig and Trancendental Instructions------------------------------ 10840 instruct cosD_reg(regD dst) %{ 10841 match(Set dst (CosD dst)); 10842 10843 format %{ "dcos $dst\n\t" %} 10844 opcode(0xD9, 0xFF); 10845 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) ); 10846 ins_pipe( pipe_slow ); 10847 %} 10848 10849 instruct sinD_reg(regD dst) %{ 10850 match(Set dst (SinD dst)); 10851 10852 format %{ "dsin $dst\n\t" %} 10853 opcode(0xD9, 0xFE); 10854 ins_encode( Push_SrcXD(dst), OpcP, OpcS, Push_ResultXD(dst) ); 10855 ins_pipe( pipe_slow ); 10856 %} 10857 10858 instruct tanD_reg(regD dst) %{ 10859 match(Set dst (TanD dst)); 10860 10861 format %{ "dtan $dst\n\t" %} 10862 ins_encode( Push_SrcXD(dst), 10863 Opcode(0xD9), Opcode(0xF2), //fptan 10864 Opcode(0xDD), Opcode(0xD8), //fstp st 10865 Push_ResultXD(dst) ); 10866 ins_pipe( pipe_slow ); 10867 %} 10868 10869 instruct log10D_reg(regD dst) %{ 10870 // The source and result Double operands in XMM registers 10871 match(Set dst (Log10D dst)); 10872 // fldlg2 ; push log_10(2) on the FPU stack; full 80-bit number 10873 // fyl2x ; compute log_10(2) * log_2(x) 10874 format %{ "fldlg2\t\t\t#Log10\n\t" 10875 "fyl2x\t\t\t# Q=Log10*Log_2(x)\n\t" 10876 %} 10877 ins_encode(Opcode(0xD9), Opcode(0xEC), // fldlg2 10878 Push_SrcXD(dst), 10879 Opcode(0xD9), Opcode(0xF1), // fyl2x 10880 Push_ResultXD(dst)); 10881 10882 ins_pipe( pipe_slow ); 10883 %} 10884 10885 instruct logD_reg(regD dst) %{ 10886 // The source and result Double operands in XMM registers 10887 match(Set dst (LogD dst)); 10888 // fldln2 ; push log_e(2) on the FPU stack; full 80-bit number 10889 // fyl2x ; compute log_e(2) * log_2(x) 10890 format %{ "fldln2\t\t\t#Log_e\n\t" 10891 "fyl2x\t\t\t# Q=Log_e*Log_2(x)\n\t" 10892 %} 10893 ins_encode( Opcode(0xD9), Opcode(0xED), // fldln2 10894 Push_SrcXD(dst), 10895 Opcode(0xD9), Opcode(0xF1), // fyl2x 10896 Push_ResultXD(dst)); 10897 ins_pipe( pipe_slow ); 10898 %} 10899 10900 10901 10902 //----------Arithmetic Conversion Instructions--------------------------------- 10903 10904 instruct roundFloat_nop(regF dst) 10905 %{ 10906 match(Set dst (RoundFloat dst)); 10907 10908 ins_cost(0); 10909 ins_encode(); 10910 ins_pipe(empty); 10911 %} 10912 10913 instruct roundDouble_nop(regD dst) 10914 %{ 10915 match(Set dst (RoundDouble dst)); 10916 10917 ins_cost(0); 10918 ins_encode(); 10919 ins_pipe(empty); 10920 %} 10921 10922 instruct convF2D_reg_reg(regD dst, regF src) 10923 %{ 10924 match(Set dst (ConvF2D src)); 10925 10926 format %{ "cvtss2sd $dst, $src" %} 10927 opcode(0xF3, 0x0F, 0x5A); 10928 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10929 ins_pipe(pipe_slow); // XXX 10930 %} 10931 10932 instruct convF2D_reg_mem(regD dst, memory src) 10933 %{ 10934 match(Set dst (ConvF2D (LoadF src))); 10935 10936 format %{ "cvtss2sd $dst, $src" %} 10937 opcode(0xF3, 0x0F, 0x5A); 10938 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10939 ins_pipe(pipe_slow); // XXX 10940 %} 10941 10942 instruct convD2F_reg_reg(regF dst, regD src) 10943 %{ 10944 match(Set dst (ConvD2F src)); 10945 10946 format %{ "cvtsd2ss $dst, $src" %} 10947 opcode(0xF2, 0x0F, 0x5A); 10948 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 10949 ins_pipe(pipe_slow); // XXX 10950 %} 10951 10952 instruct convD2F_reg_mem(regF dst, memory src) 10953 %{ 10954 match(Set dst (ConvD2F (LoadD src))); 10955 10956 format %{ "cvtsd2ss $dst, $src" %} 10957 opcode(0xF2, 0x0F, 0x5A); 10958 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 10959 ins_pipe(pipe_slow); // XXX 10960 %} 10961 10962 // XXX do mem variants 10963 instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr) 10964 %{ 10965 match(Set dst (ConvF2I src)); 10966 effect(KILL cr); 10967 10968 format %{ "cvttss2sil $dst, $src\t# f2i\n\t" 10969 "cmpl $dst, #0x80000000\n\t" 10970 "jne,s done\n\t" 10971 "subq rsp, #8\n\t" 10972 "movss [rsp], $src\n\t" 10973 "call f2i_fixup\n\t" 10974 "popq $dst\n" 10975 "done: "%} 10976 opcode(0xF3, 0x0F, 0x2C); 10977 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src), 10978 f2i_fixup(dst, src)); 10979 ins_pipe(pipe_slow); 10980 %} 10981 10982 instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr) 10983 %{ 10984 match(Set dst (ConvF2L src)); 10985 effect(KILL cr); 10986 10987 format %{ "cvttss2siq $dst, $src\t# f2l\n\t" 10988 "cmpq $dst, [0x8000000000000000]\n\t" 10989 "jne,s done\n\t" 10990 "subq rsp, #8\n\t" 10991 "movss [rsp], $src\n\t" 10992 "call f2l_fixup\n\t" 10993 "popq $dst\n" 10994 "done: "%} 10995 opcode(0xF3, 0x0F, 0x2C); 10996 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src), 10997 f2l_fixup(dst, src)); 10998 ins_pipe(pipe_slow); 10999 %} 11000 11001 instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr) 11002 %{ 11003 match(Set dst (ConvD2I src)); 11004 effect(KILL cr); 11005 11006 format %{ "cvttsd2sil $dst, $src\t# d2i\n\t" 11007 "cmpl $dst, #0x80000000\n\t" 11008 "jne,s done\n\t" 11009 "subq rsp, #8\n\t" 11010 "movsd [rsp], $src\n\t" 11011 "call d2i_fixup\n\t" 11012 "popq $dst\n" 11013 "done: "%} 11014 opcode(0xF2, 0x0F, 0x2C); 11015 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src), 11016 d2i_fixup(dst, src)); 11017 ins_pipe(pipe_slow); 11018 %} 11019 11020 instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr) 11021 %{ 11022 match(Set dst (ConvD2L src)); 11023 effect(KILL cr); 11024 11025 format %{ "cvttsd2siq $dst, $src\t# d2l\n\t" 11026 "cmpq $dst, [0x8000000000000000]\n\t" 11027 "jne,s done\n\t" 11028 "subq rsp, #8\n\t" 11029 "movsd [rsp], $src\n\t" 11030 "call d2l_fixup\n\t" 11031 "popq $dst\n" 11032 "done: "%} 11033 opcode(0xF2, 0x0F, 0x2C); 11034 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src), 11035 d2l_fixup(dst, src)); 11036 ins_pipe(pipe_slow); 11037 %} 11038 11039 instruct convI2F_reg_reg(regF dst, rRegI src) 11040 %{ 11041 predicate(!UseXmmI2F); 11042 match(Set dst (ConvI2F src)); 11043 11044 format %{ "cvtsi2ssl $dst, $src\t# i2f" %} 11045 opcode(0xF3, 0x0F, 0x2A); 11046 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 11047 ins_pipe(pipe_slow); // XXX 11048 %} 11049 11050 instruct convI2F_reg_mem(regF dst, memory src) 11051 %{ 11052 match(Set dst (ConvI2F (LoadI src))); 11053 11054 format %{ "cvtsi2ssl $dst, $src\t# i2f" %} 11055 opcode(0xF3, 0x0F, 0x2A); 11056 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11057 ins_pipe(pipe_slow); // XXX 11058 %} 11059 11060 instruct convI2D_reg_reg(regD dst, rRegI src) 11061 %{ 11062 predicate(!UseXmmI2D); 11063 match(Set dst (ConvI2D src)); 11064 11065 format %{ "cvtsi2sdl $dst, $src\t# i2d" %} 11066 opcode(0xF2, 0x0F, 0x2A); 11067 ins_encode(OpcP, REX_reg_reg(dst, src), OpcS, OpcT, reg_reg(dst, src)); 11068 ins_pipe(pipe_slow); // XXX 11069 %} 11070 11071 instruct convI2D_reg_mem(regD dst, memory src) 11072 %{ 11073 match(Set dst (ConvI2D (LoadI src))); 11074 11075 format %{ "cvtsi2sdl $dst, $src\t# i2d" %} 11076 opcode(0xF2, 0x0F, 0x2A); 11077 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11078 ins_pipe(pipe_slow); // XXX 11079 %} 11080 11081 instruct convXI2F_reg(regF dst, rRegI src) 11082 %{ 11083 predicate(UseXmmI2F); 11084 match(Set dst (ConvI2F src)); 11085 11086 format %{ "movdl $dst, $src\n\t" 11087 "cvtdq2psl $dst, $dst\t# i2f" %} 11088 ins_encode %{ 11089 __ movdl($dst$$XMMRegister, $src$$Register); 11090 __ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister); 11091 %} 11092 ins_pipe(pipe_slow); // XXX 11093 %} 11094 11095 instruct convXI2D_reg(regD dst, rRegI src) 11096 %{ 11097 predicate(UseXmmI2D); 11098 match(Set dst (ConvI2D src)); 11099 11100 format %{ "movdl $dst, $src\n\t" 11101 "cvtdq2pdl $dst, $dst\t# i2d" %} 11102 ins_encode %{ 11103 __ movdl($dst$$XMMRegister, $src$$Register); 11104 __ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister); 11105 %} 11106 ins_pipe(pipe_slow); // XXX 11107 %} 11108 11109 instruct convL2F_reg_reg(regF dst, rRegL src) 11110 %{ 11111 match(Set dst (ConvL2F src)); 11112 11113 format %{ "cvtsi2ssq $dst, $src\t# l2f" %} 11114 opcode(0xF3, 0x0F, 0x2A); 11115 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src)); 11116 ins_pipe(pipe_slow); // XXX 11117 %} 11118 11119 instruct convL2F_reg_mem(regF dst, memory src) 11120 %{ 11121 match(Set dst (ConvL2F (LoadL src))); 11122 11123 format %{ "cvtsi2ssq $dst, $src\t# l2f" %} 11124 opcode(0xF3, 0x0F, 0x2A); 11125 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11126 ins_pipe(pipe_slow); // XXX 11127 %} 11128 11129 instruct convL2D_reg_reg(regD dst, rRegL src) 11130 %{ 11131 match(Set dst (ConvL2D src)); 11132 11133 format %{ "cvtsi2sdq $dst, $src\t# l2d" %} 11134 opcode(0xF2, 0x0F, 0x2A); 11135 ins_encode(OpcP, REX_reg_reg_wide(dst, src), OpcS, OpcT, reg_reg(dst, src)); 11136 ins_pipe(pipe_slow); // XXX 11137 %} 11138 11139 instruct convL2D_reg_mem(regD dst, memory src) 11140 %{ 11141 match(Set dst (ConvL2D (LoadL src))); 11142 11143 format %{ "cvtsi2sdq $dst, $src\t# l2d" %} 11144 opcode(0xF2, 0x0F, 0x2A); 11145 ins_encode(OpcP, REX_reg_mem_wide(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11146 ins_pipe(pipe_slow); // XXX 11147 %} 11148 11149 instruct convI2L_reg_reg(rRegL dst, rRegI src) 11150 %{ 11151 match(Set dst (ConvI2L src)); 11152 11153 ins_cost(125); 11154 format %{ "movslq $dst, $src\t# i2l" %} 11155 ins_encode %{ 11156 __ movslq($dst$$Register, $src$$Register); 11157 %} 11158 ins_pipe(ialu_reg_reg); 11159 %} 11160 11161 // instruct convI2L_reg_reg_foo(rRegL dst, rRegI src) 11162 // %{ 11163 // match(Set dst (ConvI2L src)); 11164 // // predicate(_kids[0]->_leaf->as_Type()->type()->is_int()->_lo >= 0 && 11165 // // _kids[0]->_leaf->as_Type()->type()->is_int()->_hi >= 0); 11166 // predicate(((const TypeNode*) n)->type()->is_long()->_hi == 11167 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_hi && 11168 // ((const TypeNode*) n)->type()->is_long()->_lo == 11169 // (unsigned int) ((const TypeNode*) n)->type()->is_long()->_lo); 11170 11171 // format %{ "movl $dst, $src\t# unsigned i2l" %} 11172 // ins_encode(enc_copy(dst, src)); 11173 // // opcode(0x63); // needs REX.W 11174 // // ins_encode(REX_reg_reg_wide(dst, src), OpcP, reg_reg(dst,src)); 11175 // ins_pipe(ialu_reg_reg); 11176 // %} 11177 11178 // Zero-extend convert int to long 11179 instruct convI2L_reg_reg_zex(rRegL dst, rRegI src, immL_32bits mask) 11180 %{ 11181 match(Set dst (AndL (ConvI2L src) mask)); 11182 11183 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %} 11184 ins_encode(enc_copy(dst, src)); 11185 ins_pipe(ialu_reg_reg); 11186 %} 11187 11188 // Zero-extend convert int to long 11189 instruct convI2L_reg_mem_zex(rRegL dst, memory src, immL_32bits mask) 11190 %{ 11191 match(Set dst (AndL (ConvI2L (LoadI src)) mask)); 11192 11193 format %{ "movl $dst, $src\t# i2l zero-extend\n\t" %} 11194 opcode(0x8B); 11195 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 11196 ins_pipe(ialu_reg_mem); 11197 %} 11198 11199 instruct zerox_long_reg_reg(rRegL dst, rRegL src, immL_32bits mask) 11200 %{ 11201 match(Set dst (AndL src mask)); 11202 11203 format %{ "movl $dst, $src\t# zero-extend long" %} 11204 ins_encode(enc_copy_always(dst, src)); 11205 ins_pipe(ialu_reg_reg); 11206 %} 11207 11208 instruct convL2I_reg_reg(rRegI dst, rRegL src) 11209 %{ 11210 match(Set dst (ConvL2I src)); 11211 11212 format %{ "movl $dst, $src\t# l2i" %} 11213 ins_encode(enc_copy_always(dst, src)); 11214 ins_pipe(ialu_reg_reg); 11215 %} 11216 11217 11218 instruct MoveF2I_stack_reg(rRegI dst, stackSlotF src) %{ 11219 match(Set dst (MoveF2I src)); 11220 effect(DEF dst, USE src); 11221 11222 ins_cost(125); 11223 format %{ "movl $dst, $src\t# MoveF2I_stack_reg" %} 11224 opcode(0x8B); 11225 ins_encode(REX_reg_mem(dst, src), OpcP, reg_mem(dst, src)); 11226 ins_pipe(ialu_reg_mem); 11227 %} 11228 11229 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{ 11230 match(Set dst (MoveI2F src)); 11231 effect(DEF dst, USE src); 11232 11233 ins_cost(125); 11234 format %{ "movss $dst, $src\t# MoveI2F_stack_reg" %} 11235 opcode(0xF3, 0x0F, 0x10); 11236 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11237 ins_pipe(pipe_slow); 11238 %} 11239 11240 instruct MoveD2L_stack_reg(rRegL dst, stackSlotD src) %{ 11241 match(Set dst (MoveD2L src)); 11242 effect(DEF dst, USE src); 11243 11244 ins_cost(125); 11245 format %{ "movq $dst, $src\t# MoveD2L_stack_reg" %} 11246 opcode(0x8B); 11247 ins_encode(REX_reg_mem_wide(dst, src), OpcP, reg_mem(dst, src)); 11248 ins_pipe(ialu_reg_mem); 11249 %} 11250 11251 instruct MoveL2D_stack_reg_partial(regD dst, stackSlotL src) %{ 11252 predicate(!UseXmmLoadAndClearUpper); 11253 match(Set dst (MoveL2D src)); 11254 effect(DEF dst, USE src); 11255 11256 ins_cost(125); 11257 format %{ "movlpd $dst, $src\t# MoveL2D_stack_reg" %} 11258 opcode(0x66, 0x0F, 0x12); 11259 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11260 ins_pipe(pipe_slow); 11261 %} 11262 11263 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{ 11264 predicate(UseXmmLoadAndClearUpper); 11265 match(Set dst (MoveL2D src)); 11266 effect(DEF dst, USE src); 11267 11268 ins_cost(125); 11269 format %{ "movsd $dst, $src\t# MoveL2D_stack_reg" %} 11270 opcode(0xF2, 0x0F, 0x10); 11271 ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, reg_mem(dst, src)); 11272 ins_pipe(pipe_slow); 11273 %} 11274 11275 11276 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{ 11277 match(Set dst (MoveF2I src)); 11278 effect(DEF dst, USE src); 11279 11280 ins_cost(95); // XXX 11281 format %{ "movss $dst, $src\t# MoveF2I_reg_stack" %} 11282 opcode(0xF3, 0x0F, 0x11); 11283 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst)); 11284 ins_pipe(pipe_slow); 11285 %} 11286 11287 instruct MoveI2F_reg_stack(stackSlotF dst, rRegI src) %{ 11288 match(Set dst (MoveI2F src)); 11289 effect(DEF dst, USE src); 11290 11291 ins_cost(100); 11292 format %{ "movl $dst, $src\t# MoveI2F_reg_stack" %} 11293 opcode(0x89); 11294 ins_encode(REX_reg_mem(src, dst), OpcP, reg_mem(src, dst)); 11295 ins_pipe( ialu_mem_reg ); 11296 %} 11297 11298 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{ 11299 match(Set dst (MoveD2L src)); 11300 effect(DEF dst, USE src); 11301 11302 ins_cost(95); // XXX 11303 format %{ "movsd $dst, $src\t# MoveL2D_reg_stack" %} 11304 opcode(0xF2, 0x0F, 0x11); 11305 ins_encode(OpcP, REX_reg_mem(src, dst), OpcS, OpcT, reg_mem(src, dst)); 11306 ins_pipe(pipe_slow); 11307 %} 11308 11309 instruct MoveL2D_reg_stack(stackSlotD dst, rRegL src) %{ 11310 match(Set dst (MoveL2D src)); 11311 effect(DEF dst, USE src); 11312 11313 ins_cost(100); 11314 format %{ "movq $dst, $src\t# MoveL2D_reg_stack" %} 11315 opcode(0x89); 11316 ins_encode(REX_reg_mem_wide(src, dst), OpcP, reg_mem(src, dst)); 11317 ins_pipe(ialu_mem_reg); 11318 %} 11319 11320 instruct MoveF2I_reg_reg(rRegI dst, regF src) %{ 11321 match(Set dst (MoveF2I src)); 11322 effect(DEF dst, USE src); 11323 ins_cost(85); 11324 format %{ "movd $dst,$src\t# MoveF2I" %} 11325 ins_encode %{ __ movdl($dst$$Register, $src$$XMMRegister); %} 11326 ins_pipe( pipe_slow ); 11327 %} 11328 11329 instruct MoveD2L_reg_reg(rRegL dst, regD src) %{ 11330 match(Set dst (MoveD2L src)); 11331 effect(DEF dst, USE src); 11332 ins_cost(85); 11333 format %{ "movd $dst,$src\t# MoveD2L" %} 11334 ins_encode %{ __ movdq($dst$$Register, $src$$XMMRegister); %} 11335 ins_pipe( pipe_slow ); 11336 %} 11337 11338 // The next instructions have long latency and use Int unit. Set high cost. 11339 instruct MoveI2F_reg_reg(regF dst, rRegI src) %{ 11340 match(Set dst (MoveI2F src)); 11341 effect(DEF dst, USE src); 11342 ins_cost(300); 11343 format %{ "movd $dst,$src\t# MoveI2F" %} 11344 ins_encode %{ __ movdl($dst$$XMMRegister, $src$$Register); %} 11345 ins_pipe( pipe_slow ); 11346 %} 11347 11348 instruct MoveL2D_reg_reg(regD dst, rRegL src) %{ 11349 match(Set dst (MoveL2D src)); 11350 effect(DEF dst, USE src); 11351 ins_cost(300); 11352 format %{ "movd $dst,$src\t# MoveL2D" %} 11353 ins_encode %{ __ movdq($dst$$XMMRegister, $src$$Register); %} 11354 ins_pipe( pipe_slow ); 11355 %} 11356 11357 // Replicate scalar to packed byte (1 byte) values in xmm 11358 instruct Repl8B_reg(regD dst, regD src) %{ 11359 match(Set dst (Replicate8B src)); 11360 format %{ "MOVDQA $dst,$src\n\t" 11361 "PUNPCKLBW $dst,$dst\n\t" 11362 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %} 11363 ins_encode( pshufd_8x8(dst, src)); 11364 ins_pipe( pipe_slow ); 11365 %} 11366 11367 // Replicate scalar to packed byte (1 byte) values in xmm 11368 instruct Repl8B_rRegI(regD dst, rRegI src) %{ 11369 match(Set dst (Replicate8B src)); 11370 format %{ "MOVD $dst,$src\n\t" 11371 "PUNPCKLBW $dst,$dst\n\t" 11372 "PSHUFLW $dst,$dst,0x00\t! replicate8B" %} 11373 ins_encode( mov_i2x(dst, src), pshufd_8x8(dst, dst)); 11374 ins_pipe( pipe_slow ); 11375 %} 11376 11377 // Replicate scalar zero to packed byte (1 byte) values in xmm 11378 instruct Repl8B_immI0(regD dst, immI0 zero) %{ 11379 match(Set dst (Replicate8B zero)); 11380 format %{ "PXOR $dst,$dst\t! replicate8B" %} 11381 ins_encode( pxor(dst, dst)); 11382 ins_pipe( fpu_reg_reg ); 11383 %} 11384 11385 // Replicate scalar to packed shore (2 byte) values in xmm 11386 instruct Repl4S_reg(regD dst, regD src) %{ 11387 match(Set dst (Replicate4S src)); 11388 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4S" %} 11389 ins_encode( pshufd_4x16(dst, src)); 11390 ins_pipe( fpu_reg_reg ); 11391 %} 11392 11393 // Replicate scalar to packed shore (2 byte) values in xmm 11394 instruct Repl4S_rRegI(regD dst, rRegI src) %{ 11395 match(Set dst (Replicate4S src)); 11396 format %{ "MOVD $dst,$src\n\t" 11397 "PSHUFLW $dst,$dst,0x00\t! replicate4S" %} 11398 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst)); 11399 ins_pipe( fpu_reg_reg ); 11400 %} 11401 11402 // Replicate scalar zero to packed short (2 byte) values in xmm 11403 instruct Repl4S_immI0(regD dst, immI0 zero) %{ 11404 match(Set dst (Replicate4S zero)); 11405 format %{ "PXOR $dst,$dst\t! replicate4S" %} 11406 ins_encode( pxor(dst, dst)); 11407 ins_pipe( fpu_reg_reg ); 11408 %} 11409 11410 // Replicate scalar to packed char (2 byte) values in xmm 11411 instruct Repl4C_reg(regD dst, regD src) %{ 11412 match(Set dst (Replicate4C src)); 11413 format %{ "PSHUFLW $dst,$src,0x00\t! replicate4C" %} 11414 ins_encode( pshufd_4x16(dst, src)); 11415 ins_pipe( fpu_reg_reg ); 11416 %} 11417 11418 // Replicate scalar to packed char (2 byte) values in xmm 11419 instruct Repl4C_rRegI(regD dst, rRegI src) %{ 11420 match(Set dst (Replicate4C src)); 11421 format %{ "MOVD $dst,$src\n\t" 11422 "PSHUFLW $dst,$dst,0x00\t! replicate4C" %} 11423 ins_encode( mov_i2x(dst, src), pshufd_4x16(dst, dst)); 11424 ins_pipe( fpu_reg_reg ); 11425 %} 11426 11427 // Replicate scalar zero to packed char (2 byte) values in xmm 11428 instruct Repl4C_immI0(regD dst, immI0 zero) %{ 11429 match(Set dst (Replicate4C zero)); 11430 format %{ "PXOR $dst,$dst\t! replicate4C" %} 11431 ins_encode( pxor(dst, dst)); 11432 ins_pipe( fpu_reg_reg ); 11433 %} 11434 11435 // Replicate scalar to packed integer (4 byte) values in xmm 11436 instruct Repl2I_reg(regD dst, regD src) %{ 11437 match(Set dst (Replicate2I src)); 11438 format %{ "PSHUFD $dst,$src,0x00\t! replicate2I" %} 11439 ins_encode( pshufd(dst, src, 0x00)); 11440 ins_pipe( fpu_reg_reg ); 11441 %} 11442 11443 // Replicate scalar to packed integer (4 byte) values in xmm 11444 instruct Repl2I_rRegI(regD dst, rRegI src) %{ 11445 match(Set dst (Replicate2I src)); 11446 format %{ "MOVD $dst,$src\n\t" 11447 "PSHUFD $dst,$dst,0x00\t! replicate2I" %} 11448 ins_encode( mov_i2x(dst, src), pshufd(dst, dst, 0x00)); 11449 ins_pipe( fpu_reg_reg ); 11450 %} 11451 11452 // Replicate scalar zero to packed integer (2 byte) values in xmm 11453 instruct Repl2I_immI0(regD dst, immI0 zero) %{ 11454 match(Set dst (Replicate2I zero)); 11455 format %{ "PXOR $dst,$dst\t! replicate2I" %} 11456 ins_encode( pxor(dst, dst)); 11457 ins_pipe( fpu_reg_reg ); 11458 %} 11459 11460 // Replicate scalar to packed single precision floating point values in xmm 11461 instruct Repl2F_reg(regD dst, regD src) %{ 11462 match(Set dst (Replicate2F src)); 11463 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %} 11464 ins_encode( pshufd(dst, src, 0xe0)); 11465 ins_pipe( fpu_reg_reg ); 11466 %} 11467 11468 // Replicate scalar to packed single precision floating point values in xmm 11469 instruct Repl2F_regF(regD dst, regF src) %{ 11470 match(Set dst (Replicate2F src)); 11471 format %{ "PSHUFD $dst,$src,0xe0\t! replicate2F" %} 11472 ins_encode( pshufd(dst, src, 0xe0)); 11473 ins_pipe( fpu_reg_reg ); 11474 %} 11475 11476 // Replicate scalar to packed single precision floating point values in xmm 11477 instruct Repl2F_immF0(regD dst, immF0 zero) %{ 11478 match(Set dst (Replicate2F zero)); 11479 format %{ "PXOR $dst,$dst\t! replicate2F" %} 11480 ins_encode( pxor(dst, dst)); 11481 ins_pipe( fpu_reg_reg ); 11482 %} 11483 11484 11485 // ======================================================================= 11486 // fast clearing of an array 11487 instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy, 11488 rFlagsReg cr) 11489 %{ 11490 match(Set dummy (ClearArray cnt base)); 11491 effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr); 11492 11493 format %{ "xorl rax, rax\t# ClearArray:\n\t" 11494 "rep stosq\t# Store rax to *rdi++ while rcx--" %} 11495 ins_encode(opc_reg_reg(0x33, RAX, RAX), // xorl %eax, %eax 11496 Opcode(0xF3), Opcode(0x48), Opcode(0xAB)); // rep REX_W stos 11497 ins_pipe(pipe_slow); 11498 %} 11499 11500 instruct string_compare(rdi_RegP str1, rcx_RegI cnt1, rsi_RegP str2, rdx_RegI cnt2, 11501 rax_RegI result, regD tmp1, rFlagsReg cr) 11502 %{ 11503 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); 11504 effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); 11505 11506 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1" %} 11507 ins_encode %{ 11508 __ string_compare($str1$$Register, $str2$$Register, 11509 $cnt1$$Register, $cnt2$$Register, $result$$Register, 11510 $tmp1$$XMMRegister); 11511 %} 11512 ins_pipe( pipe_slow ); 11513 %} 11514 11515 // fast search of substring with known size. 11516 instruct string_indexof_con(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, 11517 rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) 11518 %{ 11519 predicate(UseSSE42Intrinsics); 11520 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); 11521 effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); 11522 11523 format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %} 11524 ins_encode %{ 11525 int icnt2 = (int)$int_cnt2$$constant; 11526 if (icnt2 >= 8) { 11527 // IndexOf for constant substrings with size >= 8 elements 11528 // which don't need to be loaded through stack. 11529 __ string_indexofC8($str1$$Register, $str2$$Register, 11530 $cnt1$$Register, $cnt2$$Register, 11531 icnt2, $result$$Register, 11532 $vec$$XMMRegister, $tmp$$Register); 11533 } else { 11534 // Small strings are loaded through stack if they cross page boundary. 11535 __ string_indexof($str1$$Register, $str2$$Register, 11536 $cnt1$$Register, $cnt2$$Register, 11537 icnt2, $result$$Register, 11538 $vec$$XMMRegister, $tmp$$Register); 11539 } 11540 %} 11541 ins_pipe( pipe_slow ); 11542 %} 11543 11544 instruct string_indexof(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, 11545 rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr) 11546 %{ 11547 predicate(UseSSE42Intrinsics); 11548 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); 11549 effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); 11550 11551 format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} 11552 ins_encode %{ 11553 __ string_indexof($str1$$Register, $str2$$Register, 11554 $cnt1$$Register, $cnt2$$Register, 11555 (-1), $result$$Register, 11556 $vec$$XMMRegister, $tmp$$Register); 11557 %} 11558 ins_pipe( pipe_slow ); 11559 %} 11560 11561 // fast string equals 11562 instruct string_equals(rdi_RegP str1, rsi_RegP str2, rcx_RegI cnt, rax_RegI result, 11563 regD tmp1, regD tmp2, rbx_RegI tmp3, rFlagsReg cr) 11564 %{ 11565 match(Set result (StrEquals (Binary str1 str2) cnt)); 11566 effect(TEMP tmp1, TEMP tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp3, KILL cr); 11567 11568 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp1, $tmp2, $tmp3" %} 11569 ins_encode %{ 11570 __ char_arrays_equals(false, $str1$$Register, $str2$$Register, 11571 $cnt$$Register, $result$$Register, $tmp3$$Register, 11572 $tmp1$$XMMRegister, $tmp2$$XMMRegister); 11573 %} 11574 ins_pipe( pipe_slow ); 11575 %} 11576 11577 // fast array equals 11578 instruct array_equals(rdi_RegP ary1, rsi_RegP ary2, rax_RegI result, 11579 regD tmp1, regD tmp2, rcx_RegI tmp3, rbx_RegI tmp4, rFlagsReg cr) 11580 %{ 11581 match(Set result (AryEq ary1 ary2)); 11582 effect(TEMP tmp1, TEMP tmp2, USE_KILL ary1, USE_KILL ary2, KILL tmp3, KILL tmp4, KILL cr); 11583 //ins_cost(300); 11584 11585 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1, $tmp2, $tmp3, $tmp4" %} 11586 ins_encode %{ 11587 __ char_arrays_equals(true, $ary1$$Register, $ary2$$Register, 11588 $tmp3$$Register, $result$$Register, $tmp4$$Register, 11589 $tmp1$$XMMRegister, $tmp2$$XMMRegister); 11590 %} 11591 ins_pipe( pipe_slow ); 11592 %} 11593 11594 //----------Control Flow Instructions------------------------------------------ 11595 // Signed compare Instructions 11596 11597 // XXX more variants!! 11598 instruct compI_rReg(rFlagsReg cr, rRegI op1, rRegI op2) 11599 %{ 11600 match(Set cr (CmpI op1 op2)); 11601 effect(DEF cr, USE op1, USE op2); 11602 11603 format %{ "cmpl $op1, $op2" %} 11604 opcode(0x3B); /* Opcode 3B /r */ 11605 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2)); 11606 ins_pipe(ialu_cr_reg_reg); 11607 %} 11608 11609 instruct compI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2) 11610 %{ 11611 match(Set cr (CmpI op1 op2)); 11612 11613 format %{ "cmpl $op1, $op2" %} 11614 opcode(0x81, 0x07); /* Opcode 81 /7 */ 11615 ins_encode(OpcSErm(op1, op2), Con8or32(op2)); 11616 ins_pipe(ialu_cr_reg_imm); 11617 %} 11618 11619 instruct compI_rReg_mem(rFlagsReg cr, rRegI op1, memory op2) 11620 %{ 11621 match(Set cr (CmpI op1 (LoadI op2))); 11622 11623 ins_cost(500); // XXX 11624 format %{ "cmpl $op1, $op2" %} 11625 opcode(0x3B); /* Opcode 3B /r */ 11626 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2)); 11627 ins_pipe(ialu_cr_reg_mem); 11628 %} 11629 11630 instruct testI_reg(rFlagsReg cr, rRegI src, immI0 zero) 11631 %{ 11632 match(Set cr (CmpI src zero)); 11633 11634 format %{ "testl $src, $src" %} 11635 opcode(0x85); 11636 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src)); 11637 ins_pipe(ialu_cr_reg_imm); 11638 %} 11639 11640 instruct testI_reg_imm(rFlagsReg cr, rRegI src, immI con, immI0 zero) 11641 %{ 11642 match(Set cr (CmpI (AndI src con) zero)); 11643 11644 format %{ "testl $src, $con" %} 11645 opcode(0xF7, 0x00); 11646 ins_encode(REX_reg(src), OpcP, reg_opc(src), Con32(con)); 11647 ins_pipe(ialu_cr_reg_imm); 11648 %} 11649 11650 instruct testI_reg_mem(rFlagsReg cr, rRegI src, memory mem, immI0 zero) 11651 %{ 11652 match(Set cr (CmpI (AndI src (LoadI mem)) zero)); 11653 11654 format %{ "testl $src, $mem" %} 11655 opcode(0x85); 11656 ins_encode(REX_reg_mem(src, mem), OpcP, reg_mem(src, mem)); 11657 ins_pipe(ialu_cr_reg_mem); 11658 %} 11659 11660 // Unsigned compare Instructions; really, same as signed except they 11661 // produce an rFlagsRegU instead of rFlagsReg. 11662 instruct compU_rReg(rFlagsRegU cr, rRegI op1, rRegI op2) 11663 %{ 11664 match(Set cr (CmpU op1 op2)); 11665 11666 format %{ "cmpl $op1, $op2\t# unsigned" %} 11667 opcode(0x3B); /* Opcode 3B /r */ 11668 ins_encode(REX_reg_reg(op1, op2), OpcP, reg_reg(op1, op2)); 11669 ins_pipe(ialu_cr_reg_reg); 11670 %} 11671 11672 instruct compU_rReg_imm(rFlagsRegU cr, rRegI op1, immI op2) 11673 %{ 11674 match(Set cr (CmpU op1 op2)); 11675 11676 format %{ "cmpl $op1, $op2\t# unsigned" %} 11677 opcode(0x81,0x07); /* Opcode 81 /7 */ 11678 ins_encode(OpcSErm(op1, op2), Con8or32(op2)); 11679 ins_pipe(ialu_cr_reg_imm); 11680 %} 11681 11682 instruct compU_rReg_mem(rFlagsRegU cr, rRegI op1, memory op2) 11683 %{ 11684 match(Set cr (CmpU op1 (LoadI op2))); 11685 11686 ins_cost(500); // XXX 11687 format %{ "cmpl $op1, $op2\t# unsigned" %} 11688 opcode(0x3B); /* Opcode 3B /r */ 11689 ins_encode(REX_reg_mem(op1, op2), OpcP, reg_mem(op1, op2)); 11690 ins_pipe(ialu_cr_reg_mem); 11691 %} 11692 11693 // // // Cisc-spilled version of cmpU_rReg 11694 // //instruct compU_mem_rReg(rFlagsRegU cr, memory op1, rRegI op2) 11695 // //%{ 11696 // // match(Set cr (CmpU (LoadI op1) op2)); 11697 // // 11698 // // format %{ "CMPu $op1,$op2" %} 11699 // // ins_cost(500); 11700 // // opcode(0x39); /* Opcode 39 /r */ 11701 // // ins_encode( OpcP, reg_mem( op1, op2) ); 11702 // //%} 11703 11704 instruct testU_reg(rFlagsRegU cr, rRegI src, immI0 zero) 11705 %{ 11706 match(Set cr (CmpU src zero)); 11707 11708 format %{ "testl $src, $src\t# unsigned" %} 11709 opcode(0x85); 11710 ins_encode(REX_reg_reg(src, src), OpcP, reg_reg(src, src)); 11711 ins_pipe(ialu_cr_reg_imm); 11712 %} 11713 11714 instruct compP_rReg(rFlagsRegU cr, rRegP op1, rRegP op2) 11715 %{ 11716 match(Set cr (CmpP op1 op2)); 11717 11718 format %{ "cmpq $op1, $op2\t# ptr" %} 11719 opcode(0x3B); /* Opcode 3B /r */ 11720 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2)); 11721 ins_pipe(ialu_cr_reg_reg); 11722 %} 11723 11724 instruct compP_rReg_mem(rFlagsRegU cr, rRegP op1, memory op2) 11725 %{ 11726 match(Set cr (CmpP op1 (LoadP op2))); 11727 11728 ins_cost(500); // XXX 11729 format %{ "cmpq $op1, $op2\t# ptr" %} 11730 opcode(0x3B); /* Opcode 3B /r */ 11731 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2)); 11732 ins_pipe(ialu_cr_reg_mem); 11733 %} 11734 11735 // // // Cisc-spilled version of cmpP_rReg 11736 // //instruct compP_mem_rReg(rFlagsRegU cr, memory op1, rRegP op2) 11737 // //%{ 11738 // // match(Set cr (CmpP (LoadP op1) op2)); 11739 // // 11740 // // format %{ "CMPu $op1,$op2" %} 11741 // // ins_cost(500); 11742 // // opcode(0x39); /* Opcode 39 /r */ 11743 // // ins_encode( OpcP, reg_mem( op1, op2) ); 11744 // //%} 11745 11746 // XXX this is generalized by compP_rReg_mem??? 11747 // Compare raw pointer (used in out-of-heap check). 11748 // Only works because non-oop pointers must be raw pointers 11749 // and raw pointers have no anti-dependencies. 11750 instruct compP_mem_rReg(rFlagsRegU cr, rRegP op1, memory op2) 11751 %{ 11752 predicate(!n->in(2)->in(2)->bottom_type()->isa_oop_ptr()); 11753 match(Set cr (CmpP op1 (LoadP op2))); 11754 11755 format %{ "cmpq $op1, $op2\t# raw ptr" %} 11756 opcode(0x3B); /* Opcode 3B /r */ 11757 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2)); 11758 ins_pipe(ialu_cr_reg_mem); 11759 %} 11760 11761 // This will generate a signed flags result. This should be OK since 11762 // any compare to a zero should be eq/neq. 11763 instruct testP_reg(rFlagsReg cr, rRegP src, immP0 zero) 11764 %{ 11765 match(Set cr (CmpP src zero)); 11766 11767 format %{ "testq $src, $src\t# ptr" %} 11768 opcode(0x85); 11769 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src)); 11770 ins_pipe(ialu_cr_reg_imm); 11771 %} 11772 11773 // This will generate a signed flags result. This should be OK since 11774 // any compare to a zero should be eq/neq. 11775 instruct testP_mem(rFlagsReg cr, memory op, immP0 zero) 11776 %{ 11777 predicate(!UseCompressedOops || (Universe::narrow_oop_base() != NULL)); 11778 match(Set cr (CmpP (LoadP op) zero)); 11779 11780 ins_cost(500); // XXX 11781 format %{ "testq $op, 0xffffffffffffffff\t# ptr" %} 11782 opcode(0xF7); /* Opcode F7 /0 */ 11783 ins_encode(REX_mem_wide(op), 11784 OpcP, RM_opc_mem(0x00, op), Con_d32(0xFFFFFFFF)); 11785 ins_pipe(ialu_cr_reg_imm); 11786 %} 11787 11788 instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero) 11789 %{ 11790 predicate(UseCompressedOops && (Universe::narrow_oop_base() == NULL)); 11791 match(Set cr (CmpP (LoadP mem) zero)); 11792 11793 format %{ "cmpq R12, $mem\t# ptr (R12_heapbase==0)" %} 11794 ins_encode %{ 11795 __ cmpq(r12, $mem$$Address); 11796 %} 11797 ins_pipe(ialu_cr_reg_mem); 11798 %} 11799 11800 instruct compN_rReg(rFlagsRegU cr, rRegN op1, rRegN op2) 11801 %{ 11802 match(Set cr (CmpN op1 op2)); 11803 11804 format %{ "cmpl $op1, $op2\t# compressed ptr" %} 11805 ins_encode %{ __ cmpl($op1$$Register, $op2$$Register); %} 11806 ins_pipe(ialu_cr_reg_reg); 11807 %} 11808 11809 instruct compN_rReg_mem(rFlagsRegU cr, rRegN src, memory mem) 11810 %{ 11811 match(Set cr (CmpN src (LoadN mem))); 11812 11813 format %{ "cmpl $src, $mem\t# compressed ptr" %} 11814 ins_encode %{ 11815 __ cmpl($src$$Register, $mem$$Address); 11816 %} 11817 ins_pipe(ialu_cr_reg_mem); 11818 %} 11819 11820 instruct compN_rReg_imm(rFlagsRegU cr, rRegN op1, immN op2) %{ 11821 match(Set cr (CmpN op1 op2)); 11822 11823 format %{ "cmpl $op1, $op2\t# compressed ptr" %} 11824 ins_encode %{ 11825 __ cmp_narrow_oop($op1$$Register, (jobject)$op2$$constant); 11826 %} 11827 ins_pipe(ialu_cr_reg_imm); 11828 %} 11829 11830 instruct compN_mem_imm(rFlagsRegU cr, memory mem, immN src) 11831 %{ 11832 match(Set cr (CmpN src (LoadN mem))); 11833 11834 format %{ "cmpl $mem, $src\t# compressed ptr" %} 11835 ins_encode %{ 11836 __ cmp_narrow_oop($mem$$Address, (jobject)$src$$constant); 11837 %} 11838 ins_pipe(ialu_cr_reg_mem); 11839 %} 11840 11841 instruct testN_reg(rFlagsReg cr, rRegN src, immN0 zero) %{ 11842 match(Set cr (CmpN src zero)); 11843 11844 format %{ "testl $src, $src\t# compressed ptr" %} 11845 ins_encode %{ __ testl($src$$Register, $src$$Register); %} 11846 ins_pipe(ialu_cr_reg_imm); 11847 %} 11848 11849 instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero) 11850 %{ 11851 predicate(Universe::narrow_oop_base() != NULL); 11852 match(Set cr (CmpN (LoadN mem) zero)); 11853 11854 ins_cost(500); // XXX 11855 format %{ "testl $mem, 0xffffffff\t# compressed ptr" %} 11856 ins_encode %{ 11857 __ cmpl($mem$$Address, (int)0xFFFFFFFF); 11858 %} 11859 ins_pipe(ialu_cr_reg_mem); 11860 %} 11861 11862 instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero) 11863 %{ 11864 predicate(Universe::narrow_oop_base() == NULL); 11865 match(Set cr (CmpN (LoadN mem) zero)); 11866 11867 format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %} 11868 ins_encode %{ 11869 __ cmpl(r12, $mem$$Address); 11870 %} 11871 ins_pipe(ialu_cr_reg_mem); 11872 %} 11873 11874 // Yanked all unsigned pointer compare operations. 11875 // Pointer compares are done with CmpP which is already unsigned. 11876 11877 instruct compL_rReg(rFlagsReg cr, rRegL op1, rRegL op2) 11878 %{ 11879 match(Set cr (CmpL op1 op2)); 11880 11881 format %{ "cmpq $op1, $op2" %} 11882 opcode(0x3B); /* Opcode 3B /r */ 11883 ins_encode(REX_reg_reg_wide(op1, op2), OpcP, reg_reg(op1, op2)); 11884 ins_pipe(ialu_cr_reg_reg); 11885 %} 11886 11887 instruct compL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2) 11888 %{ 11889 match(Set cr (CmpL op1 op2)); 11890 11891 format %{ "cmpq $op1, $op2" %} 11892 opcode(0x81, 0x07); /* Opcode 81 /7 */ 11893 ins_encode(OpcSErm_wide(op1, op2), Con8or32(op2)); 11894 ins_pipe(ialu_cr_reg_imm); 11895 %} 11896 11897 instruct compL_rReg_mem(rFlagsReg cr, rRegL op1, memory op2) 11898 %{ 11899 match(Set cr (CmpL op1 (LoadL op2))); 11900 11901 format %{ "cmpq $op1, $op2" %} 11902 opcode(0x3B); /* Opcode 3B /r */ 11903 ins_encode(REX_reg_mem_wide(op1, op2), OpcP, reg_mem(op1, op2)); 11904 ins_pipe(ialu_cr_reg_mem); 11905 %} 11906 11907 instruct testL_reg(rFlagsReg cr, rRegL src, immL0 zero) 11908 %{ 11909 match(Set cr (CmpL src zero)); 11910 11911 format %{ "testq $src, $src" %} 11912 opcode(0x85); 11913 ins_encode(REX_reg_reg_wide(src, src), OpcP, reg_reg(src, src)); 11914 ins_pipe(ialu_cr_reg_imm); 11915 %} 11916 11917 instruct testL_reg_imm(rFlagsReg cr, rRegL src, immL32 con, immL0 zero) 11918 %{ 11919 match(Set cr (CmpL (AndL src con) zero)); 11920 11921 format %{ "testq $src, $con\t# long" %} 11922 opcode(0xF7, 0x00); 11923 ins_encode(REX_reg_wide(src), OpcP, reg_opc(src), Con32(con)); 11924 ins_pipe(ialu_cr_reg_imm); 11925 %} 11926 11927 instruct testL_reg_mem(rFlagsReg cr, rRegL src, memory mem, immL0 zero) 11928 %{ 11929 match(Set cr (CmpL (AndL src (LoadL mem)) zero)); 11930 11931 format %{ "testq $src, $mem" %} 11932 opcode(0x85); 11933 ins_encode(REX_reg_mem_wide(src, mem), OpcP, reg_mem(src, mem)); 11934 ins_pipe(ialu_cr_reg_mem); 11935 %} 11936 11937 // Manifest a CmpL result in an integer register. Very painful. 11938 // This is the test to avoid. 11939 instruct cmpL3_reg_reg(rRegI dst, rRegL src1, rRegL src2, rFlagsReg flags) 11940 %{ 11941 match(Set dst (CmpL3 src1 src2)); 11942 effect(KILL flags); 11943 11944 ins_cost(275); // XXX 11945 format %{ "cmpq $src1, $src2\t# CmpL3\n\t" 11946 "movl $dst, -1\n\t" 11947 "jl,s done\n\t" 11948 "setne $dst\n\t" 11949 "movzbl $dst, $dst\n\t" 11950 "done:" %} 11951 ins_encode(cmpl3_flag(src1, src2, dst)); 11952 ins_pipe(pipe_slow); 11953 %} 11954 11955 //----------Max and Min-------------------------------------------------------- 11956 // Min Instructions 11957 11958 instruct cmovI_reg_g(rRegI dst, rRegI src, rFlagsReg cr) 11959 %{ 11960 effect(USE_DEF dst, USE src, USE cr); 11961 11962 format %{ "cmovlgt $dst, $src\t# min" %} 11963 opcode(0x0F, 0x4F); 11964 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 11965 ins_pipe(pipe_cmov_reg); 11966 %} 11967 11968 11969 instruct minI_rReg(rRegI dst, rRegI src) 11970 %{ 11971 match(Set dst (MinI dst src)); 11972 11973 ins_cost(200); 11974 expand %{ 11975 rFlagsReg cr; 11976 compI_rReg(cr, dst, src); 11977 cmovI_reg_g(dst, src, cr); 11978 %} 11979 %} 11980 11981 instruct cmovI_reg_l(rRegI dst, rRegI src, rFlagsReg cr) 11982 %{ 11983 effect(USE_DEF dst, USE src, USE cr); 11984 11985 format %{ "cmovllt $dst, $src\t# max" %} 11986 opcode(0x0F, 0x4C); 11987 ins_encode(REX_reg_reg(dst, src), OpcP, OpcS, reg_reg(dst, src)); 11988 ins_pipe(pipe_cmov_reg); 11989 %} 11990 11991 11992 instruct maxI_rReg(rRegI dst, rRegI src) 11993 %{ 11994 match(Set dst (MaxI dst src)); 11995 11996 ins_cost(200); 11997 expand %{ 11998 rFlagsReg cr; 11999 compI_rReg(cr, dst, src); 12000 cmovI_reg_l(dst, src, cr); 12001 %} 12002 %} 12003 12004 // ============================================================================ 12005 // Branch Instructions 12006 12007 // Jump Direct - Label defines a relative address from JMP+1 12008 instruct jmpDir(label labl) 12009 %{ 12010 match(Goto); 12011 effect(USE labl); 12012 12013 ins_cost(300); 12014 format %{ "jmp $labl" %} 12015 size(5); 12016 opcode(0xE9); 12017 ins_encode(OpcP, Lbl(labl)); 12018 ins_pipe(pipe_jmp); 12019 %} 12020 12021 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12022 instruct jmpCon(cmpOp cop, rFlagsReg cr, label labl) 12023 %{ 12024 match(If cop cr); 12025 effect(USE labl); 12026 12027 ins_cost(300); 12028 format %{ "j$cop $labl" %} 12029 size(6); 12030 opcode(0x0F, 0x80); 12031 ins_encode(Jcc(cop, labl)); 12032 ins_pipe(pipe_jcc); 12033 %} 12034 12035 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12036 instruct jmpLoopEnd(cmpOp cop, rFlagsReg cr, label labl) 12037 %{ 12038 match(CountedLoopEnd cop cr); 12039 effect(USE labl); 12040 12041 ins_cost(300); 12042 format %{ "j$cop $labl\t# loop end" %} 12043 size(6); 12044 opcode(0x0F, 0x80); 12045 ins_encode(Jcc(cop, labl)); 12046 ins_pipe(pipe_jcc); 12047 %} 12048 12049 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12050 instruct jmpLoopEndU(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12051 match(CountedLoopEnd cop cmp); 12052 effect(USE labl); 12053 12054 ins_cost(300); 12055 format %{ "j$cop,u $labl\t# loop end" %} 12056 size(6); 12057 opcode(0x0F, 0x80); 12058 ins_encode(Jcc(cop, labl)); 12059 ins_pipe(pipe_jcc); 12060 %} 12061 12062 instruct jmpLoopEndUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12063 match(CountedLoopEnd cop cmp); 12064 effect(USE labl); 12065 12066 ins_cost(200); 12067 format %{ "j$cop,u $labl\t# loop end" %} 12068 size(6); 12069 opcode(0x0F, 0x80); 12070 ins_encode(Jcc(cop, labl)); 12071 ins_pipe(pipe_jcc); 12072 %} 12073 12074 // Jump Direct Conditional - using unsigned comparison 12075 instruct jmpConU(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12076 match(If cop cmp); 12077 effect(USE labl); 12078 12079 ins_cost(300); 12080 format %{ "j$cop,u $labl" %} 12081 size(6); 12082 opcode(0x0F, 0x80); 12083 ins_encode(Jcc(cop, labl)); 12084 ins_pipe(pipe_jcc); 12085 %} 12086 12087 instruct jmpConUCF(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12088 match(If cop cmp); 12089 effect(USE labl); 12090 12091 ins_cost(200); 12092 format %{ "j$cop,u $labl" %} 12093 size(6); 12094 opcode(0x0F, 0x80); 12095 ins_encode(Jcc(cop, labl)); 12096 ins_pipe(pipe_jcc); 12097 %} 12098 12099 instruct jmpConUCF2(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ 12100 match(If cop cmp); 12101 effect(USE labl); 12102 12103 ins_cost(200); 12104 format %{ $$template 12105 if ($cop$$cmpcode == Assembler::notEqual) { 12106 $$emit$$"jp,u $labl\n\t" 12107 $$emit$$"j$cop,u $labl" 12108 } else { 12109 $$emit$$"jp,u done\n\t" 12110 $$emit$$"j$cop,u $labl\n\t" 12111 $$emit$$"done:" 12112 } 12113 %} 12114 size(12); 12115 opcode(0x0F, 0x80); 12116 ins_encode %{ 12117 Label* l = $labl$$label; 12118 $$$emit8$primary; 12119 emit_cc(cbuf, $secondary, Assembler::parity); 12120 int parity_disp = -1; 12121 if ($cop$$cmpcode == Assembler::notEqual) { 12122 // the two jumps 6 bytes apart so the jump distances are too 12123 parity_disp = l->loc_pos() - (cbuf.insts_size() + 4); 12124 } else if ($cop$$cmpcode == Assembler::equal) { 12125 parity_disp = 6; 12126 } else { 12127 ShouldNotReachHere(); 12128 } 12129 emit_d32(cbuf, parity_disp); 12130 $$$emit8$primary; 12131 emit_cc(cbuf, $secondary, $cop$$cmpcode); 12132 int disp = l->loc_pos() - (cbuf.insts_size() + 4); 12133 emit_d32(cbuf, disp); 12134 %} 12135 ins_pipe(pipe_jcc); 12136 %} 12137 12138 // ============================================================================ 12139 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary 12140 // superklass array for an instance of the superklass. Set a hidden 12141 // internal cache on a hit (cache is checked with exposed code in 12142 // gen_subtype_check()). Return NZ for a miss or zero for a hit. The 12143 // encoding ALSO sets flags. 12144 12145 instruct partialSubtypeCheck(rdi_RegP result, 12146 rsi_RegP sub, rax_RegP super, rcx_RegI rcx, 12147 rFlagsReg cr) 12148 %{ 12149 match(Set result (PartialSubtypeCheck sub super)); 12150 effect(KILL rcx, KILL cr); 12151 12152 ins_cost(1100); // slightly larger than the next version 12153 format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t" 12154 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t" 12155 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t" 12156 "repne scasq\t# Scan *rdi++ for a match with rax while rcx--\n\t" 12157 "jne,s miss\t\t# Missed: rdi not-zero\n\t" 12158 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t" 12159 "xorq $result, $result\t\t Hit: rdi zero\n\t" 12160 "miss:\t" %} 12161 12162 opcode(0x1); // Force a XOR of RDI 12163 ins_encode(enc_PartialSubtypeCheck()); 12164 ins_pipe(pipe_slow); 12165 %} 12166 12167 instruct partialSubtypeCheck_vs_Zero(rFlagsReg cr, 12168 rsi_RegP sub, rax_RegP super, rcx_RegI rcx, 12169 immP0 zero, 12170 rdi_RegP result) 12171 %{ 12172 match(Set cr (CmpP (PartialSubtypeCheck sub super) zero)); 12173 effect(KILL rcx, KILL result); 12174 12175 ins_cost(1000); 12176 format %{ "movq rdi, [$sub + (sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes())]\n\t" 12177 "movl rcx, [rdi + arrayOopDesc::length_offset_in_bytes()]\t# length to scan\n\t" 12178 "addq rdi, arrayOopDex::base_offset_in_bytes(T_OBJECT)\t# Skip to start of data; set NZ in case count is zero\n\t" 12179 "repne scasq\t# Scan *rdi++ for a match with rax while cx-- != 0\n\t" 12180 "jne,s miss\t\t# Missed: flags nz\n\t" 12181 "movq [$sub + (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())], $super\t# Hit: update cache\n\t" 12182 "miss:\t" %} 12183 12184 opcode(0x0); // No need to XOR RDI 12185 ins_encode(enc_PartialSubtypeCheck()); 12186 ins_pipe(pipe_slow); 12187 %} 12188 12189 // ============================================================================ 12190 // Branch Instructions -- short offset versions 12191 // 12192 // These instructions are used to replace jumps of a long offset (the default 12193 // match) with jumps of a shorter offset. These instructions are all tagged 12194 // with the ins_short_branch attribute, which causes the ADLC to suppress the 12195 // match rules in general matching. Instead, the ADLC generates a conversion 12196 // method in the MachNode which can be used to do in-place replacement of the 12197 // long variant with the shorter variant. The compiler will determine if a 12198 // branch can be taken by the is_short_branch_offset() predicate in the machine 12199 // specific code section of the file. 12200 12201 // Jump Direct - Label defines a relative address from JMP+1 12202 instruct jmpDir_short(label labl) %{ 12203 match(Goto); 12204 effect(USE labl); 12205 12206 ins_cost(300); 12207 format %{ "jmp,s $labl" %} 12208 size(2); 12209 opcode(0xEB); 12210 ins_encode(OpcP, LblShort(labl)); 12211 ins_pipe(pipe_jmp); 12212 ins_short_branch(1); 12213 %} 12214 12215 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12216 instruct jmpCon_short(cmpOp cop, rFlagsReg cr, label labl) %{ 12217 match(If cop cr); 12218 effect(USE labl); 12219 12220 ins_cost(300); 12221 format %{ "j$cop,s $labl" %} 12222 size(2); 12223 opcode(0x70); 12224 ins_encode(JccShort(cop, labl)); 12225 ins_pipe(pipe_jcc); 12226 ins_short_branch(1); 12227 %} 12228 12229 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12230 instruct jmpLoopEnd_short(cmpOp cop, rFlagsReg cr, label labl) %{ 12231 match(CountedLoopEnd cop cr); 12232 effect(USE labl); 12233 12234 ins_cost(300); 12235 format %{ "j$cop,s $labl\t# loop end" %} 12236 size(2); 12237 opcode(0x70); 12238 ins_encode(JccShort(cop, labl)); 12239 ins_pipe(pipe_jcc); 12240 ins_short_branch(1); 12241 %} 12242 12243 // Jump Direct Conditional - Label defines a relative address from Jcc+1 12244 instruct jmpLoopEndU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12245 match(CountedLoopEnd cop cmp); 12246 effect(USE labl); 12247 12248 ins_cost(300); 12249 format %{ "j$cop,us $labl\t# loop end" %} 12250 size(2); 12251 opcode(0x70); 12252 ins_encode(JccShort(cop, labl)); 12253 ins_pipe(pipe_jcc); 12254 ins_short_branch(1); 12255 %} 12256 12257 instruct jmpLoopEndUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12258 match(CountedLoopEnd cop cmp); 12259 effect(USE labl); 12260 12261 ins_cost(300); 12262 format %{ "j$cop,us $labl\t# loop end" %} 12263 size(2); 12264 opcode(0x70); 12265 ins_encode(JccShort(cop, labl)); 12266 ins_pipe(pipe_jcc); 12267 ins_short_branch(1); 12268 %} 12269 12270 // Jump Direct Conditional - using unsigned comparison 12271 instruct jmpConU_short(cmpOpU cop, rFlagsRegU cmp, label labl) %{ 12272 match(If cop cmp); 12273 effect(USE labl); 12274 12275 ins_cost(300); 12276 format %{ "j$cop,us $labl" %} 12277 size(2); 12278 opcode(0x70); 12279 ins_encode(JccShort(cop, labl)); 12280 ins_pipe(pipe_jcc); 12281 ins_short_branch(1); 12282 %} 12283 12284 instruct jmpConUCF_short(cmpOpUCF cop, rFlagsRegUCF cmp, label labl) %{ 12285 match(If cop cmp); 12286 effect(USE labl); 12287 12288 ins_cost(300); 12289 format %{ "j$cop,us $labl" %} 12290 size(2); 12291 opcode(0x70); 12292 ins_encode(JccShort(cop, labl)); 12293 ins_pipe(pipe_jcc); 12294 ins_short_branch(1); 12295 %} 12296 12297 instruct jmpConUCF2_short(cmpOpUCF2 cop, rFlagsRegUCF cmp, label labl) %{ 12298 match(If cop cmp); 12299 effect(USE labl); 12300 12301 ins_cost(300); 12302 format %{ $$template 12303 if ($cop$$cmpcode == Assembler::notEqual) { 12304 $$emit$$"jp,u,s $labl\n\t" 12305 $$emit$$"j$cop,u,s $labl" 12306 } else { 12307 $$emit$$"jp,u,s done\n\t" 12308 $$emit$$"j$cop,u,s $labl\n\t" 12309 $$emit$$"done:" 12310 } 12311 %} 12312 size(4); 12313 opcode(0x70); 12314 ins_encode %{ 12315 Label* l = $labl$$label; 12316 emit_cc(cbuf, $primary, Assembler::parity); 12317 int parity_disp = -1; 12318 if ($cop$$cmpcode == Assembler::notEqual) { 12319 parity_disp = l->loc_pos() - (cbuf.insts_size() + 1); 12320 } else if ($cop$$cmpcode == Assembler::equal) { 12321 parity_disp = 2; 12322 } else { 12323 ShouldNotReachHere(); 12324 } 12325 emit_d8(cbuf, parity_disp); 12326 emit_cc(cbuf, $primary, $cop$$cmpcode); 12327 int disp = l->loc_pos() - (cbuf.insts_size() + 1); 12328 emit_d8(cbuf, disp); 12329 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 12330 assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp"); 12331 %} 12332 ins_pipe(pipe_jcc); 12333 ins_short_branch(1); 12334 %} 12335 12336 // ============================================================================ 12337 // inlined locking and unlocking 12338 12339 instruct cmpFastLock(rFlagsReg cr, 12340 rRegP object, rRegP box, rax_RegI tmp, rRegP scr) 12341 %{ 12342 match(Set cr (FastLock object box)); 12343 effect(TEMP tmp, TEMP scr); 12344 12345 ins_cost(300); 12346 format %{ "fastlock $object,$box,$tmp,$scr" %} 12347 ins_encode(Fast_Lock(object, box, tmp, scr)); 12348 ins_pipe(pipe_slow); 12349 %} 12350 12351 instruct cmpFastUnlock(rFlagsReg cr, 12352 rRegP object, rax_RegP box, rRegP tmp) 12353 %{ 12354 match(Set cr (FastUnlock object box)); 12355 effect(TEMP tmp); 12356 12357 ins_cost(300); 12358 format %{ "fastunlock $object, $box, $tmp" %} 12359 ins_encode(Fast_Unlock(object, box, tmp)); 12360 ins_pipe(pipe_slow); 12361 %} 12362 12363 12364 // ============================================================================ 12365 // Safepoint Instructions 12366 instruct safePoint_poll(rFlagsReg cr) 12367 %{ 12368 predicate(!Assembler::is_polling_page_far()); 12369 match(SafePoint); 12370 effect(KILL cr); 12371 12372 format %{ "testl rax, [rip + #offset_to_poll_page]\t" 12373 "# Safepoint: poll for GC" %} 12374 ins_cost(125); 12375 ins_encode %{ 12376 AddressLiteral addr(os::get_polling_page(), relocInfo::poll_type); 12377 __ testl(rax, addr); 12378 %} 12379 ins_pipe(ialu_reg_mem); 12380 %} 12381 12382 instruct safePoint_poll_far(rFlagsReg cr, rRegP poll) 12383 %{ 12384 predicate(Assembler::is_polling_page_far()); 12385 match(SafePoint poll); 12386 effect(KILL cr, USE poll); 12387 12388 format %{ "testl rax, [$poll]\t" 12389 "# Safepoint: poll for GC" %} 12390 ins_cost(125); 12391 ins_encode %{ 12392 __ relocate(relocInfo::poll_type); 12393 __ testl(rax, Address($poll$$Register, 0)); 12394 %} 12395 ins_pipe(ialu_reg_mem); 12396 %} 12397 12398 // ============================================================================ 12399 // Procedure Call/Return Instructions 12400 // Call Java Static Instruction 12401 // Note: If this code changes, the corresponding ret_addr_offset() and 12402 // compute_padding() functions will have to be adjusted. 12403 instruct CallStaticJavaDirect(method meth) %{ 12404 match(CallStaticJava); 12405 predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke()); 12406 effect(USE meth); 12407 12408 ins_cost(300); 12409 format %{ "call,static " %} 12410 opcode(0xE8); /* E8 cd */ 12411 ins_encode(Java_Static_Call(meth), call_epilog); 12412 ins_pipe(pipe_slow); 12413 ins_alignment(4); 12414 %} 12415 12416 // Call Java Static Instruction (method handle version) 12417 // Note: If this code changes, the corresponding ret_addr_offset() and 12418 // compute_padding() functions will have to be adjusted. 12419 instruct CallStaticJavaHandle(method meth, rbp_RegP rbp_mh_SP_save) %{ 12420 match(CallStaticJava); 12421 predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke()); 12422 effect(USE meth); 12423 // RBP is saved by all callees (for interpreter stack correction). 12424 // We use it here for a similar purpose, in {preserve,restore}_SP. 12425 12426 ins_cost(300); 12427 format %{ "call,static/MethodHandle " %} 12428 opcode(0xE8); /* E8 cd */ 12429 ins_encode(preserve_SP, 12430 Java_Static_Call(meth), 12431 restore_SP, 12432 call_epilog); 12433 ins_pipe(pipe_slow); 12434 ins_alignment(4); 12435 %} 12436 12437 // Call Java Dynamic Instruction 12438 // Note: If this code changes, the corresponding ret_addr_offset() and 12439 // compute_padding() functions will have to be adjusted. 12440 instruct CallDynamicJavaDirect(method meth) 12441 %{ 12442 match(CallDynamicJava); 12443 effect(USE meth); 12444 12445 ins_cost(300); 12446 format %{ "movq rax, #Universe::non_oop_word()\n\t" 12447 "call,dynamic " %} 12448 opcode(0xE8); /* E8 cd */ 12449 ins_encode(Java_Dynamic_Call(meth), call_epilog); 12450 ins_pipe(pipe_slow); 12451 ins_alignment(4); 12452 %} 12453 12454 // Call Runtime Instruction 12455 instruct CallRuntimeDirect(method meth) 12456 %{ 12457 match(CallRuntime); 12458 effect(USE meth); 12459 12460 ins_cost(300); 12461 format %{ "call,runtime " %} 12462 opcode(0xE8); /* E8 cd */ 12463 ins_encode(Java_To_Runtime(meth)); 12464 ins_pipe(pipe_slow); 12465 %} 12466 12467 // Call runtime without safepoint 12468 instruct CallLeafDirect(method meth) 12469 %{ 12470 match(CallLeaf); 12471 effect(USE meth); 12472 12473 ins_cost(300); 12474 format %{ "call_leaf,runtime " %} 12475 opcode(0xE8); /* E8 cd */ 12476 ins_encode(Java_To_Runtime(meth)); 12477 ins_pipe(pipe_slow); 12478 %} 12479 12480 // Call runtime without safepoint 12481 instruct CallLeafNoFPDirect(method meth) 12482 %{ 12483 match(CallLeafNoFP); 12484 effect(USE meth); 12485 12486 ins_cost(300); 12487 format %{ "call_leaf_nofp,runtime " %} 12488 opcode(0xE8); /* E8 cd */ 12489 ins_encode(Java_To_Runtime(meth)); 12490 ins_pipe(pipe_slow); 12491 %} 12492 12493 // Return Instruction 12494 // Remove the return address & jump to it. 12495 // Notice: We always emit a nop after a ret to make sure there is room 12496 // for safepoint patching 12497 instruct Ret() 12498 %{ 12499 match(Return); 12500 12501 format %{ "ret" %} 12502 opcode(0xC3); 12503 ins_encode(OpcP); 12504 ins_pipe(pipe_jmp); 12505 %} 12506 12507 // Tail Call; Jump from runtime stub to Java code. 12508 // Also known as an 'interprocedural jump'. 12509 // Target of jump will eventually return to caller. 12510 // TailJump below removes the return address. 12511 instruct TailCalljmpInd(no_rbp_RegP jump_target, rbx_RegP method_oop) 12512 %{ 12513 match(TailCall jump_target method_oop); 12514 12515 ins_cost(300); 12516 format %{ "jmp $jump_target\t# rbx holds method oop" %} 12517 opcode(0xFF, 0x4); /* Opcode FF /4 */ 12518 ins_encode(REX_reg(jump_target), OpcP, reg_opc(jump_target)); 12519 ins_pipe(pipe_jmp); 12520 %} 12521 12522 // Tail Jump; remove the return address; jump to target. 12523 // TailCall above leaves the return address around. 12524 instruct tailjmpInd(no_rbp_RegP jump_target, rax_RegP ex_oop) 12525 %{ 12526 match(TailJump jump_target ex_oop); 12527 12528 ins_cost(300); 12529 format %{ "popq rdx\t# pop return address\n\t" 12530 "jmp $jump_target" %} 12531 opcode(0xFF, 0x4); /* Opcode FF /4 */ 12532 ins_encode(Opcode(0x5a), // popq rdx 12533 REX_reg(jump_target), OpcP, reg_opc(jump_target)); 12534 ins_pipe(pipe_jmp); 12535 %} 12536 12537 // Create exception oop: created by stack-crawling runtime code. 12538 // Created exception is now available to this handler, and is setup 12539 // just prior to jumping to this handler. No code emitted. 12540 instruct CreateException(rax_RegP ex_oop) 12541 %{ 12542 match(Set ex_oop (CreateEx)); 12543 12544 size(0); 12545 // use the following format syntax 12546 format %{ "# exception oop is in rax; no code emitted" %} 12547 ins_encode(); 12548 ins_pipe(empty); 12549 %} 12550 12551 // Rethrow exception: 12552 // The exception oop will come in the first argument position. 12553 // Then JUMP (not call) to the rethrow stub code. 12554 instruct RethrowException() 12555 %{ 12556 match(Rethrow); 12557 12558 // use the following format syntax 12559 format %{ "jmp rethrow_stub" %} 12560 ins_encode(enc_rethrow); 12561 ins_pipe(pipe_jmp); 12562 %} 12563 12564 12565 //----------PEEPHOLE RULES----------------------------------------------------- 12566 // These must follow all instruction definitions as they use the names 12567 // defined in the instructions definitions. 12568 // 12569 // peepmatch ( root_instr_name [preceding_instruction]* ); 12570 // 12571 // peepconstraint %{ 12572 // (instruction_number.operand_name relational_op instruction_number.operand_name 12573 // [, ...] ); 12574 // // instruction numbers are zero-based using left to right order in peepmatch 12575 // 12576 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); 12577 // // provide an instruction_number.operand_name for each operand that appears 12578 // // in the replacement instruction's match rule 12579 // 12580 // ---------VM FLAGS--------------------------------------------------------- 12581 // 12582 // All peephole optimizations can be turned off using -XX:-OptoPeephole 12583 // 12584 // Each peephole rule is given an identifying number starting with zero and 12585 // increasing by one in the order seen by the parser. An individual peephole 12586 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# 12587 // on the command-line. 12588 // 12589 // ---------CURRENT LIMITATIONS---------------------------------------------- 12590 // 12591 // Only match adjacent instructions in same basic block 12592 // Only equality constraints 12593 // Only constraints between operands, not (0.dest_reg == RAX_enc) 12594 // Only one replacement instruction 12595 // 12596 // ---------EXAMPLE---------------------------------------------------------- 12597 // 12598 // // pertinent parts of existing instructions in architecture description 12599 // instruct movI(rRegI dst, rRegI src) 12600 // %{ 12601 // match(Set dst (CopyI src)); 12602 // %} 12603 // 12604 // instruct incI_rReg(rRegI dst, immI1 src, rFlagsReg cr) 12605 // %{ 12606 // match(Set dst (AddI dst src)); 12607 // effect(KILL cr); 12608 // %} 12609 // 12610 // // Change (inc mov) to lea 12611 // peephole %{ 12612 // // increment preceeded by register-register move 12613 // peepmatch ( incI_rReg movI ); 12614 // // require that the destination register of the increment 12615 // // match the destination register of the move 12616 // peepconstraint ( 0.dst == 1.dst ); 12617 // // construct a replacement instruction that sets 12618 // // the destination to ( move's source register + one ) 12619 // peepreplace ( leaI_rReg_immI( 0.dst 1.src 0.src ) ); 12620 // %} 12621 // 12622 12623 // Implementation no longer uses movX instructions since 12624 // machine-independent system no longer uses CopyX nodes. 12625 // 12626 // peephole 12627 // %{ 12628 // peepmatch (incI_rReg movI); 12629 // peepconstraint (0.dst == 1.dst); 12630 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src)); 12631 // %} 12632 12633 // peephole 12634 // %{ 12635 // peepmatch (decI_rReg movI); 12636 // peepconstraint (0.dst == 1.dst); 12637 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src)); 12638 // %} 12639 12640 // peephole 12641 // %{ 12642 // peepmatch (addI_rReg_imm movI); 12643 // peepconstraint (0.dst == 1.dst); 12644 // peepreplace (leaI_rReg_immI(0.dst 1.src 0.src)); 12645 // %} 12646 12647 // peephole 12648 // %{ 12649 // peepmatch (incL_rReg movL); 12650 // peepconstraint (0.dst == 1.dst); 12651 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src)); 12652 // %} 12653 12654 // peephole 12655 // %{ 12656 // peepmatch (decL_rReg movL); 12657 // peepconstraint (0.dst == 1.dst); 12658 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src)); 12659 // %} 12660 12661 // peephole 12662 // %{ 12663 // peepmatch (addL_rReg_imm movL); 12664 // peepconstraint (0.dst == 1.dst); 12665 // peepreplace (leaL_rReg_immL(0.dst 1.src 0.src)); 12666 // %} 12667 12668 // peephole 12669 // %{ 12670 // peepmatch (addP_rReg_imm movP); 12671 // peepconstraint (0.dst == 1.dst); 12672 // peepreplace (leaP_rReg_imm(0.dst 1.src 0.src)); 12673 // %} 12674 12675 // // Change load of spilled value to only a spill 12676 // instruct storeI(memory mem, rRegI src) 12677 // %{ 12678 // match(Set mem (StoreI mem src)); 12679 // %} 12680 // 12681 // instruct loadI(rRegI dst, memory mem) 12682 // %{ 12683 // match(Set dst (LoadI mem)); 12684 // %} 12685 // 12686 12687 peephole 12688 %{ 12689 peepmatch (loadI storeI); 12690 peepconstraint (1.src == 0.dst, 1.mem == 0.mem); 12691 peepreplace (storeI(1.mem 1.mem 1.src)); 12692 %} 12693 12694 peephole 12695 %{ 12696 peepmatch (loadL storeL); 12697 peepconstraint (1.src == 0.dst, 1.mem == 0.mem); 12698 peepreplace (storeL(1.mem 1.mem 1.src)); 12699 %} 12700 12701 //----------SMARTSPILL RULES--------------------------------------------------- 12702 // These must follow all instruction definitions as they use the names 12703 // defined in the instructions definitions.