1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_ASSEMBLER_X86_HPP 26 #define CPU_X86_VM_ASSEMBLER_X86_HPP 27 28 class BiasedLockingCounters; 29 30 // Contains all the definitions needed for x86 assembly code generation. 31 32 // Calling convention 33 class Argument VALUE_OBJ_CLASS_SPEC { 34 public: 35 enum { 36 #ifdef _LP64 37 #ifdef _WIN64 38 n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 39 n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... ) 40 #else 41 n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 42 n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... ) 43 #endif // _WIN64 44 n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ... 45 n_float_register_parameters_j = 8 // j_farg0, j_farg1, ... 46 #else 47 n_register_parameters = 0 // 0 registers used to pass arguments 48 #endif // _LP64 49 }; 50 }; 51 52 53 #ifdef _LP64 54 // Symbolically name the register arguments used by the c calling convention. 55 // Windows is different from linux/solaris. So much for standards... 56 57 #ifdef _WIN64 58 59 REGISTER_DECLARATION(Register, c_rarg0, rcx); 60 REGISTER_DECLARATION(Register, c_rarg1, rdx); 61 REGISTER_DECLARATION(Register, c_rarg2, r8); 62 REGISTER_DECLARATION(Register, c_rarg3, r9); 63 64 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 65 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 66 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 67 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 68 69 #else 70 71 REGISTER_DECLARATION(Register, c_rarg0, rdi); 72 REGISTER_DECLARATION(Register, c_rarg1, rsi); 73 REGISTER_DECLARATION(Register, c_rarg2, rdx); 74 REGISTER_DECLARATION(Register, c_rarg3, rcx); 75 REGISTER_DECLARATION(Register, c_rarg4, r8); 76 REGISTER_DECLARATION(Register, c_rarg5, r9); 77 78 REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0); 79 REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1); 80 REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2); 81 REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3); 82 REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4); 83 REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5); 84 REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6); 85 REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7); 86 87 #endif // _WIN64 88 89 // Symbolically name the register arguments used by the Java calling convention. 90 // We have control over the convention for java so we can do what we please. 91 // What pleases us is to offset the java calling convention so that when 92 // we call a suitable jni method the arguments are lined up and we don't 93 // have to do little shuffling. A suitable jni method is non-static and a 94 // small number of arguments (two fewer args on windows) 95 // 96 // |-------------------------------------------------------| 97 // | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 | 98 // |-------------------------------------------------------| 99 // | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg) 100 // | rdi rsi rdx rcx r8 r9 | solaris/linux 101 // |-------------------------------------------------------| 102 // | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 | 103 // |-------------------------------------------------------| 104 105 REGISTER_DECLARATION(Register, j_rarg0, c_rarg1); 106 REGISTER_DECLARATION(Register, j_rarg1, c_rarg2); 107 REGISTER_DECLARATION(Register, j_rarg2, c_rarg3); 108 // Windows runs out of register args here 109 #ifdef _WIN64 110 REGISTER_DECLARATION(Register, j_rarg3, rdi); 111 REGISTER_DECLARATION(Register, j_rarg4, rsi); 112 #else 113 REGISTER_DECLARATION(Register, j_rarg3, c_rarg4); 114 REGISTER_DECLARATION(Register, j_rarg4, c_rarg5); 115 #endif /* _WIN64 */ 116 REGISTER_DECLARATION(Register, j_rarg5, c_rarg0); 117 118 REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0); 119 REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1); 120 REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2); 121 REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3); 122 REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4); 123 REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5); 124 REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6); 125 REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7); 126 127 REGISTER_DECLARATION(Register, rscratch1, r10); // volatile 128 REGISTER_DECLARATION(Register, rscratch2, r11); // volatile 129 130 REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved 131 REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved 132 133 #else 134 // rscratch1 will apear in 32bit code that is dead but of course must compile 135 // Using noreg ensures if the dead code is incorrectly live and executed it 136 // will cause an assertion failure 137 #define rscratch1 noreg 138 #define rscratch2 noreg 139 140 #endif // _LP64 141 142 // JSR 292 fixed register usages: 143 REGISTER_DECLARATION(Register, rbp_mh_SP_save, rbp); 144 145 // Address is an abstraction used to represent a memory location 146 // using any of the amd64 addressing modes with one object. 147 // 148 // Note: A register location is represented via a Register, not 149 // via an address for efficiency & simplicity reasons. 150 151 class ArrayAddress; 152 153 class Address VALUE_OBJ_CLASS_SPEC { 154 public: 155 enum ScaleFactor { 156 no_scale = -1, 157 times_1 = 0, 158 times_2 = 1, 159 times_4 = 2, 160 times_8 = 3, 161 times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4) 162 }; 163 static ScaleFactor times(int size) { 164 assert(size >= 1 && size <= 8 && is_power_of_2(size), "bad scale size"); 165 if (size == 8) return times_8; 166 if (size == 4) return times_4; 167 if (size == 2) return times_2; 168 return times_1; 169 } 170 static int scale_size(ScaleFactor scale) { 171 assert(scale != no_scale, ""); 172 assert(((1 << (int)times_1) == 1 && 173 (1 << (int)times_2) == 2 && 174 (1 << (int)times_4) == 4 && 175 (1 << (int)times_8) == 8), ""); 176 return (1 << (int)scale); 177 } 178 179 private: 180 Register _base; 181 Register _index; 182 ScaleFactor _scale; 183 int _disp; 184 RelocationHolder _rspec; 185 186 // Easily misused constructors make them private 187 // %%% can we make these go away? 188 NOT_LP64(Address(address loc, RelocationHolder spec);) 189 Address(int disp, address loc, relocInfo::relocType rtype); 190 Address(int disp, address loc, RelocationHolder spec); 191 192 public: 193 194 int disp() { return _disp; } 195 // creation 196 Address() 197 : _base(noreg), 198 _index(noreg), 199 _scale(no_scale), 200 _disp(0) { 201 } 202 203 // No default displacement otherwise Register can be implicitly 204 // converted to 0(Register) which is quite a different animal. 205 206 Address(Register base, int disp) 207 : _base(base), 208 _index(noreg), 209 _scale(no_scale), 210 _disp(disp) { 211 } 212 213 Address(Register base, Register index, ScaleFactor scale, int disp = 0) 214 : _base (base), 215 _index(index), 216 _scale(scale), 217 _disp (disp) { 218 assert(!index->is_valid() == (scale == Address::no_scale), 219 "inconsistent address"); 220 } 221 222 Address(Register base, RegisterOrConstant index, ScaleFactor scale = times_1, int disp = 0) 223 : _base (base), 224 _index(index.register_or_noreg()), 225 _scale(scale), 226 _disp (disp + (index.constant_or_zero() * scale_size(scale))) { 227 if (!index.is_register()) scale = Address::no_scale; 228 assert(!_index->is_valid() == (scale == Address::no_scale), 229 "inconsistent address"); 230 } 231 232 Address plus_disp(int disp) const { 233 Address a = (*this); 234 a._disp += disp; 235 return a; 236 } 237 Address plus_disp(RegisterOrConstant disp, ScaleFactor scale = times_1) const { 238 Address a = (*this); 239 a._disp += disp.constant_or_zero() * scale_size(scale); 240 if (disp.is_register()) { 241 assert(!a.index()->is_valid(), "competing indexes"); 242 a._index = disp.as_register(); 243 a._scale = scale; 244 } 245 return a; 246 } 247 bool is_same_address(Address a) const { 248 // disregard _rspec 249 return _base == a._base && _disp == a._disp && _index == a._index && _scale == a._scale; 250 } 251 252 // The following two overloads are used in connection with the 253 // ByteSize type (see sizes.hpp). They simplify the use of 254 // ByteSize'd arguments in assembly code. Note that their equivalent 255 // for the optimized build are the member functions with int disp 256 // argument since ByteSize is mapped to an int type in that case. 257 // 258 // Note: DO NOT introduce similar overloaded functions for WordSize 259 // arguments as in the optimized mode, both ByteSize and WordSize 260 // are mapped to the same type and thus the compiler cannot make a 261 // distinction anymore (=> compiler errors). 262 263 #ifdef ASSERT 264 Address(Register base, ByteSize disp) 265 : _base(base), 266 _index(noreg), 267 _scale(no_scale), 268 _disp(in_bytes(disp)) { 269 } 270 271 Address(Register base, Register index, ScaleFactor scale, ByteSize disp) 272 : _base(base), 273 _index(index), 274 _scale(scale), 275 _disp(in_bytes(disp)) { 276 assert(!index->is_valid() == (scale == Address::no_scale), 277 "inconsistent address"); 278 } 279 280 Address(Register base, RegisterOrConstant index, ScaleFactor scale, ByteSize disp) 281 : _base (base), 282 _index(index.register_or_noreg()), 283 _scale(scale), 284 _disp (in_bytes(disp) + (index.constant_or_zero() * scale_size(scale))) { 285 if (!index.is_register()) scale = Address::no_scale; 286 assert(!_index->is_valid() == (scale == Address::no_scale), 287 "inconsistent address"); 288 } 289 290 #endif // ASSERT 291 292 // accessors 293 bool uses(Register reg) const { return _base == reg || _index == reg; } 294 Register base() const { return _base; } 295 Register index() const { return _index; } 296 ScaleFactor scale() const { return _scale; } 297 int disp() const { return _disp; } 298 299 // Convert the raw encoding form into the form expected by the constructor for 300 // Address. An index of 4 (rsp) corresponds to having no index, so convert 301 // that to noreg for the Address constructor. 302 static Address make_raw(int base, int index, int scale, int disp, bool disp_is_oop); 303 304 static Address make_array(ArrayAddress); 305 306 private: 307 bool base_needs_rex() const { 308 return _base != noreg && _base->encoding() >= 8; 309 } 310 311 bool index_needs_rex() const { 312 return _index != noreg &&_index->encoding() >= 8; 313 } 314 315 relocInfo::relocType reloc() const { return _rspec.type(); } 316 317 friend class Assembler; 318 friend class MacroAssembler; 319 friend class LIR_Assembler; // base/index/scale/disp 320 }; 321 322 // 323 // AddressLiteral has been split out from Address because operands of this type 324 // need to be treated specially on 32bit vs. 64bit platforms. By splitting it out 325 // the few instructions that need to deal with address literals are unique and the 326 // MacroAssembler does not have to implement every instruction in the Assembler 327 // in order to search for address literals that may need special handling depending 328 // on the instruction and the platform. As small step on the way to merging i486/amd64 329 // directories. 330 // 331 class AddressLiteral VALUE_OBJ_CLASS_SPEC { 332 friend class ArrayAddress; 333 RelocationHolder _rspec; 334 // Typically we use AddressLiterals we want to use their rval 335 // However in some situations we want the lval (effect address) of the item. 336 // We provide a special factory for making those lvals. 337 bool _is_lval; 338 339 // If the target is far we'll need to load the ea of this to 340 // a register to reach it. Otherwise if near we can do rip 341 // relative addressing. 342 343 address _target; 344 345 protected: 346 // creation 347 AddressLiteral() 348 : _is_lval(false), 349 _target(NULL) 350 {} 351 352 public: 353 354 355 AddressLiteral(address target, relocInfo::relocType rtype); 356 357 AddressLiteral(address target, RelocationHolder const& rspec) 358 : _rspec(rspec), 359 _is_lval(false), 360 _target(target) 361 {} 362 363 AddressLiteral addr() { 364 AddressLiteral ret = *this; 365 ret._is_lval = true; 366 return ret; 367 } 368 369 370 private: 371 372 address target() { return _target; } 373 bool is_lval() { return _is_lval; } 374 375 relocInfo::relocType reloc() const { return _rspec.type(); } 376 const RelocationHolder& rspec() const { return _rspec; } 377 378 friend class Assembler; 379 friend class MacroAssembler; 380 friend class Address; 381 friend class LIR_Assembler; 382 }; 383 384 // Convience classes 385 class RuntimeAddress: public AddressLiteral { 386 387 public: 388 389 RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {} 390 391 }; 392 393 class OopAddress: public AddressLiteral { 394 395 public: 396 397 OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){} 398 399 }; 400 401 class ExternalAddress: public AddressLiteral { 402 private: 403 static relocInfo::relocType reloc_for_target(address target) { 404 // Sometimes ExternalAddress is used for values which aren't 405 // exactly addresses, like the card table base. 406 // external_word_type can't be used for values in the first page 407 // so just skip the reloc in that case. 408 return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none; 409 } 410 411 public: 412 413 ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(target)) {} 414 415 }; 416 417 class InternalAddress: public AddressLiteral { 418 419 public: 420 421 InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {} 422 423 }; 424 425 // x86 can do array addressing as a single operation since disp can be an absolute 426 // address amd64 can't. We create a class that expresses the concept but does extra 427 // magic on amd64 to get the final result 428 429 class ArrayAddress VALUE_OBJ_CLASS_SPEC { 430 private: 431 432 AddressLiteral _base; 433 Address _index; 434 435 public: 436 437 ArrayAddress() {}; 438 ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {}; 439 AddressLiteral base() { return _base; } 440 Address index() { return _index; } 441 442 }; 443 444 const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize); 445 446 // The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction 447 // level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write 448 // is what you get. The Assembler is generating code into a CodeBuffer. 449 450 class Assembler : public AbstractAssembler { 451 friend class AbstractAssembler; // for the non-virtual hack 452 friend class LIR_Assembler; // as_Address() 453 friend class StubGenerator; 454 455 public: 456 enum Condition { // The x86 condition codes used for conditional jumps/moves. 457 zero = 0x4, 458 notZero = 0x5, 459 equal = 0x4, 460 notEqual = 0x5, 461 less = 0xc, 462 lessEqual = 0xe, 463 greater = 0xf, 464 greaterEqual = 0xd, 465 below = 0x2, 466 belowEqual = 0x6, 467 above = 0x7, 468 aboveEqual = 0x3, 469 overflow = 0x0, 470 noOverflow = 0x1, 471 carrySet = 0x2, 472 carryClear = 0x3, 473 negative = 0x8, 474 positive = 0x9, 475 parity = 0xa, 476 noParity = 0xb 477 }; 478 479 enum Prefix { 480 // segment overrides 481 CS_segment = 0x2e, 482 SS_segment = 0x36, 483 DS_segment = 0x3e, 484 ES_segment = 0x26, 485 FS_segment = 0x64, 486 GS_segment = 0x65, 487 488 REX = 0x40, 489 490 REX_B = 0x41, 491 REX_X = 0x42, 492 REX_XB = 0x43, 493 REX_R = 0x44, 494 REX_RB = 0x45, 495 REX_RX = 0x46, 496 REX_RXB = 0x47, 497 498 REX_W = 0x48, 499 500 REX_WB = 0x49, 501 REX_WX = 0x4A, 502 REX_WXB = 0x4B, 503 REX_WR = 0x4C, 504 REX_WRB = 0x4D, 505 REX_WRX = 0x4E, 506 REX_WRXB = 0x4F, 507 508 VEX_3bytes = 0xC4, 509 VEX_2bytes = 0xC5 510 }; 511 512 enum VexPrefix { 513 VEX_B = 0x20, 514 VEX_X = 0x40, 515 VEX_R = 0x80, 516 VEX_W = 0x80 517 }; 518 519 enum VexSimdPrefix { 520 VEX_SIMD_NONE = 0x0, 521 VEX_SIMD_66 = 0x1, 522 VEX_SIMD_F3 = 0x2, 523 VEX_SIMD_F2 = 0x3 524 }; 525 526 enum VexOpcode { 527 VEX_OPCODE_NONE = 0x0, 528 VEX_OPCODE_0F = 0x1, 529 VEX_OPCODE_0F_38 = 0x2, 530 VEX_OPCODE_0F_3A = 0x3 531 }; 532 533 enum WhichOperand { 534 // input to locate_operand, and format code for relocations 535 imm_operand = 0, // embedded 32-bit|64-bit immediate operand 536 disp32_operand = 1, // embedded 32-bit displacement or address 537 call32_operand = 2, // embedded 32-bit self-relative displacement 538 #ifndef _LP64 539 _WhichOperand_limit = 3 540 #else 541 narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop 542 _WhichOperand_limit = 4 543 #endif 544 }; 545 546 547 548 // NOTE: The general philopsophy of the declarations here is that 64bit versions 549 // of instructions are freely declared without the need for wrapping them an ifdef. 550 // (Some dangerous instructions are ifdef's out of inappropriate jvm's.) 551 // In the .cpp file the implementations are wrapped so that they are dropped out 552 // of the resulting jvm. This is done mostly to keep the footprint of KERNEL 553 // to the size it was prior to merging up the 32bit and 64bit assemblers. 554 // 555 // This does mean you'll get a linker/runtime error if you use a 64bit only instruction 556 // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down. 557 558 private: 559 560 561 // 64bit prefixes 562 int prefix_and_encode(int reg_enc, bool byteinst = false); 563 int prefixq_and_encode(int reg_enc); 564 565 int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false); 566 int prefixq_and_encode(int dst_enc, int src_enc); 567 568 void prefix(Register reg); 569 void prefix(Address adr); 570 void prefixq(Address adr); 571 572 void prefix(Address adr, Register reg, bool byteinst = false); 573 void prefix(Address adr, XMMRegister reg); 574 void prefixq(Address adr, Register reg); 575 void prefixq(Address adr, XMMRegister reg); 576 577 void prefetch_prefix(Address src); 578 579 void rex_prefix(Address adr, XMMRegister xreg, 580 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 581 int rex_prefix_and_encode(int dst_enc, int src_enc, 582 VexSimdPrefix pre, VexOpcode opc, bool rex_w); 583 584 void vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, 585 int nds_enc, VexSimdPrefix pre, VexOpcode opc, 586 bool vector256); 587 588 void vex_prefix(Address adr, int nds_enc, int xreg_enc, 589 VexSimdPrefix pre, VexOpcode opc, 590 bool vex_w, bool vector256); 591 592 void vex_prefix(XMMRegister dst, XMMRegister nds, Address src, 593 VexSimdPrefix pre, bool vector256 = false) { 594 int dst_enc = dst->encoding(); 595 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 596 vex_prefix(src, nds_enc, dst_enc, pre, VEX_OPCODE_0F, false, vector256); 597 } 598 599 int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, 600 VexSimdPrefix pre, VexOpcode opc, 601 bool vex_w, bool vector256); 602 603 int vex_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, 604 VexSimdPrefix pre, bool vector256 = false, 605 VexOpcode opc = VEX_OPCODE_0F) { 606 int src_enc = src->encoding(); 607 int dst_enc = dst->encoding(); 608 int nds_enc = nds->is_valid() ? nds->encoding() : 0; 609 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, false, vector256); 610 } 611 612 void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, 613 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, 614 bool rex_w = false, bool vector256 = false); 615 616 void simd_prefix(XMMRegister dst, Address src, 617 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { 618 simd_prefix(dst, xnoreg, src, pre, opc); 619 } 620 621 void simd_prefix(Address dst, XMMRegister src, VexSimdPrefix pre) { 622 simd_prefix(src, dst, pre); 623 } 624 void simd_prefix_q(XMMRegister dst, XMMRegister nds, Address src, 625 VexSimdPrefix pre) { 626 bool rex_w = true; 627 simd_prefix(dst, nds, src, pre, VEX_OPCODE_0F, rex_w); 628 } 629 630 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, 631 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F, 632 bool rex_w = false, bool vector256 = false); 633 634 // Move/convert 32-bit integer value. 635 int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, Register src, 636 VexSimdPrefix pre) { 637 // It is OK to cast from Register to XMMRegister to pass argument here 638 // since only encoding is used in simd_prefix_and_encode() and number of 639 // Gen and Xmm registers are the same. 640 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre); 641 } 642 int simd_prefix_and_encode(XMMRegister dst, Register src, VexSimdPrefix pre) { 643 return simd_prefix_and_encode(dst, xnoreg, src, pre); 644 } 645 int simd_prefix_and_encode(Register dst, XMMRegister src, 646 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { 647 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc); 648 } 649 650 // Move/convert 64-bit integer value. 651 int simd_prefix_and_encode_q(XMMRegister dst, XMMRegister nds, Register src, 652 VexSimdPrefix pre) { 653 bool rex_w = true; 654 return simd_prefix_and_encode(dst, nds, as_XMMRegister(src->encoding()), pre, VEX_OPCODE_0F, rex_w); 655 } 656 int simd_prefix_and_encode_q(XMMRegister dst, Register src, VexSimdPrefix pre) { 657 return simd_prefix_and_encode_q(dst, xnoreg, src, pre); 658 } 659 int simd_prefix_and_encode_q(Register dst, XMMRegister src, 660 VexSimdPrefix pre, VexOpcode opc = VEX_OPCODE_0F) { 661 bool rex_w = true; 662 return simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, pre, opc, rex_w); 663 } 664 665 // Helper functions for groups of instructions 666 void emit_arith_b(int op1, int op2, Register dst, int imm8); 667 668 void emit_arith(int op1, int op2, Register dst, int32_t imm32); 669 // Force generation of a 4 byte immediate value even if it fits into 8bit 670 void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32); 671 // only 32bit?? 672 void emit_arith(int op1, int op2, Register dst, jobject obj); 673 void emit_arith(int op1, int op2, Register dst, Register src); 674 675 void emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre); 676 void emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre); 677 void emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre); 678 void emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre); 679 void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, 680 Address src, VexSimdPrefix pre, bool vector256); 681 void emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds, 682 XMMRegister src, VexSimdPrefix pre, bool vector256); 683 684 void emit_operand(Register reg, 685 Register base, Register index, Address::ScaleFactor scale, 686 int disp, 687 RelocationHolder const& rspec, 688 int rip_relative_correction = 0); 689 690 void emit_operand(Register reg, Address adr, int rip_relative_correction = 0); 691 692 // operands that only take the original 32bit registers 693 void emit_operand32(Register reg, Address adr); 694 695 void emit_operand(XMMRegister reg, 696 Register base, Register index, Address::ScaleFactor scale, 697 int disp, 698 RelocationHolder const& rspec); 699 700 void emit_operand(XMMRegister reg, Address adr); 701 702 void emit_operand(MMXRegister reg, Address adr); 703 704 // workaround gcc (3.2.1-7) bug 705 void emit_operand(Address adr, MMXRegister reg); 706 707 708 // Immediate-to-memory forms 709 void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32); 710 711 void emit_farith(int b1, int b2, int i); 712 713 714 protected: 715 #ifdef ASSERT 716 void check_relocation(RelocationHolder const& rspec, int format); 717 #endif 718 719 inline void emit_long64(jlong x); 720 721 void emit_data(jint data, relocInfo::relocType rtype, int format); 722 void emit_data(jint data, RelocationHolder const& rspec, int format); 723 void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0); 724 void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0); 725 726 bool reachable(AddressLiteral adr) NOT_LP64({ return true;}); 727 728 // These are all easily abused and hence protected 729 730 // 32BIT ONLY SECTION 731 #ifndef _LP64 732 // Make these disappear in 64bit mode since they would never be correct 733 void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 734 void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 735 736 void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 737 void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 738 739 void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY 740 #else 741 // 64BIT ONLY SECTION 742 void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY 743 744 void cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec); 745 void cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec); 746 747 void mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec); 748 void mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec); 749 #endif // _LP64 750 751 // These are unique in that we are ensured by the caller that the 32bit 752 // relative in these instructions will always be able to reach the potentially 753 // 64bit address described by entry. Since they can take a 64bit address they 754 // don't have the 32 suffix like the other instructions in this class. 755 756 void call_literal(address entry, RelocationHolder const& rspec); 757 void jmp_literal(address entry, RelocationHolder const& rspec); 758 759 // Avoid using directly section 760 // Instructions in this section are actually usable by anyone without danger 761 // of failure but have performance issues that are addressed my enhanced 762 // instructions which will do the proper thing base on the particular cpu. 763 // We protect them because we don't trust you... 764 765 // Don't use next inc() and dec() methods directly. INC & DEC instructions 766 // could cause a partial flag stall since they don't set CF flag. 767 // Use MacroAssembler::decrement() & MacroAssembler::increment() methods 768 // which call inc() & dec() or add() & sub() in accordance with 769 // the product flag UseIncDec value. 770 771 void decl(Register dst); 772 void decl(Address dst); 773 void decq(Register dst); 774 void decq(Address dst); 775 776 void incl(Register dst); 777 void incl(Address dst); 778 void incq(Register dst); 779 void incq(Address dst); 780 781 // New cpus require use of movsd and movss to avoid partial register stall 782 // when loading from memory. But for old Opteron use movlpd instead of movsd. 783 // The selection is done in MacroAssembler::movdbl() and movflt(). 784 785 // Move Scalar Single-Precision Floating-Point Values 786 void movss(XMMRegister dst, Address src); 787 void movss(XMMRegister dst, XMMRegister src); 788 void movss(Address dst, XMMRegister src); 789 790 // Move Scalar Double-Precision Floating-Point Values 791 void movsd(XMMRegister dst, Address src); 792 void movsd(XMMRegister dst, XMMRegister src); 793 void movsd(Address dst, XMMRegister src); 794 void movlpd(XMMRegister dst, Address src); 795 796 // New cpus require use of movaps and movapd to avoid partial register stall 797 // when moving between registers. 798 void movaps(XMMRegister dst, XMMRegister src); 799 void movapd(XMMRegister dst, XMMRegister src); 800 801 // End avoid using directly 802 803 804 // Instruction prefixes 805 void prefix(Prefix p); 806 807 public: 808 809 // Creation 810 Assembler(CodeBuffer* code) : AbstractAssembler(code) {} 811 812 // Decoding 813 static address locate_operand(address inst, WhichOperand which); 814 static address locate_next_instruction(address inst); 815 816 // Utilities 817 static bool is_polling_page_far() NOT_LP64({ return false;}); 818 819 // Generic instructions 820 // Does 32bit or 64bit as needed for the platform. In some sense these 821 // belong in macro assembler but there is no need for both varieties to exist 822 823 void lea(Register dst, Address src); 824 825 void mov(Register dst, Register src); 826 827 void pusha(); 828 void popa(); 829 830 void pushf(); 831 void popf(); 832 833 void push(int32_t imm32); 834 835 void push(Register src); 836 837 void pop(Register dst); 838 839 // These are dummies to prevent surprise implicit conversions to Register 840 void push(void* v); 841 void pop(void* v); 842 843 // These do register sized moves/scans 844 void rep_mov(); 845 void rep_set(); 846 void repne_scan(); 847 #ifdef _LP64 848 void repne_scanl(); 849 #endif 850 851 // Vanilla instructions in lexical order 852 853 void adcl(Address dst, int32_t imm32); 854 void adcl(Address dst, Register src); 855 void adcl(Register dst, int32_t imm32); 856 void adcl(Register dst, Address src); 857 void adcl(Register dst, Register src); 858 859 void adcq(Register dst, int32_t imm32); 860 void adcq(Register dst, Address src); 861 void adcq(Register dst, Register src); 862 863 void addl(Address dst, int32_t imm32); 864 void addl(Address dst, Register src); 865 void addl(Register dst, int32_t imm32); 866 void addl(Register dst, Address src); 867 void addl(Register dst, Register src); 868 869 void addq(Address dst, int32_t imm32); 870 void addq(Address dst, Register src); 871 void addq(Register dst, int32_t imm32); 872 void addq(Register dst, Address src); 873 void addq(Register dst, Register src); 874 875 void addr_nop_4(); 876 void addr_nop_5(); 877 void addr_nop_7(); 878 void addr_nop_8(); 879 880 // Add Scalar Double-Precision Floating-Point Values 881 void addsd(XMMRegister dst, Address src); 882 void addsd(XMMRegister dst, XMMRegister src); 883 884 // Add Scalar Single-Precision Floating-Point Values 885 void addss(XMMRegister dst, Address src); 886 void addss(XMMRegister dst, XMMRegister src); 887 888 void andl(Address dst, int32_t imm32); 889 void andl(Register dst, int32_t imm32); 890 void andl(Register dst, Address src); 891 void andl(Register dst, Register src); 892 893 void andq(Address dst, int32_t imm32); 894 void andq(Register dst, int32_t imm32); 895 void andq(Register dst, Address src); 896 void andq(Register dst, Register src); 897 898 void bsfl(Register dst, Register src); 899 void bsrl(Register dst, Register src); 900 901 #ifdef _LP64 902 void bsfq(Register dst, Register src); 903 void bsrq(Register dst, Register src); 904 #endif 905 906 void bswapl(Register reg); 907 908 void bswapq(Register reg); 909 910 void call(Label& L, relocInfo::relocType rtype); 911 void call(Register reg); // push pc; pc <- reg 912 void call(Address adr); // push pc; pc <- adr 913 914 void cdql(); 915 916 void cdqq(); 917 918 void cld() { emit_byte(0xfc); } 919 920 void clflush(Address adr); 921 922 void cmovl(Condition cc, Register dst, Register src); 923 void cmovl(Condition cc, Register dst, Address src); 924 925 void cmovq(Condition cc, Register dst, Register src); 926 void cmovq(Condition cc, Register dst, Address src); 927 928 929 void cmpb(Address dst, int imm8); 930 931 void cmpl(Address dst, int32_t imm32); 932 933 void cmpl(Register dst, int32_t imm32); 934 void cmpl(Register dst, Register src); 935 void cmpl(Register dst, Address src); 936 937 void cmpq(Address dst, int32_t imm32); 938 void cmpq(Address dst, Register src); 939 940 void cmpq(Register dst, int32_t imm32); 941 void cmpq(Register dst, Register src); 942 void cmpq(Register dst, Address src); 943 944 // these are dummies used to catch attempting to convert NULL to Register 945 void cmpl(Register dst, void* junk); // dummy 946 void cmpq(Register dst, void* junk); // dummy 947 948 void cmpw(Address dst, int imm16); 949 950 void cmpxchg8 (Address adr); 951 952 void cmpxchgl(Register reg, Address adr); 953 954 void cmpxchgq(Register reg, Address adr); 955 956 // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 957 void comisd(XMMRegister dst, Address src); 958 void comisd(XMMRegister dst, XMMRegister src); 959 960 // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 961 void comiss(XMMRegister dst, Address src); 962 void comiss(XMMRegister dst, XMMRegister src); 963 964 // Identify processor type and features 965 void cpuid() { 966 emit_byte(0x0F); 967 emit_byte(0xA2); 968 } 969 970 // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value 971 void cvtsd2ss(XMMRegister dst, XMMRegister src); 972 void cvtsd2ss(XMMRegister dst, Address src); 973 974 // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value 975 void cvtsi2sdl(XMMRegister dst, Register src); 976 void cvtsi2sdl(XMMRegister dst, Address src); 977 void cvtsi2sdq(XMMRegister dst, Register src); 978 void cvtsi2sdq(XMMRegister dst, Address src); 979 980 // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value 981 void cvtsi2ssl(XMMRegister dst, Register src); 982 void cvtsi2ssl(XMMRegister dst, Address src); 983 void cvtsi2ssq(XMMRegister dst, Register src); 984 void cvtsi2ssq(XMMRegister dst, Address src); 985 986 // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value 987 void cvtdq2pd(XMMRegister dst, XMMRegister src); 988 989 // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value 990 void cvtdq2ps(XMMRegister dst, XMMRegister src); 991 992 // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value 993 void cvtss2sd(XMMRegister dst, XMMRegister src); 994 void cvtss2sd(XMMRegister dst, Address src); 995 996 // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer 997 void cvttsd2sil(Register dst, Address src); 998 void cvttsd2sil(Register dst, XMMRegister src); 999 void cvttsd2siq(Register dst, XMMRegister src); 1000 1001 // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer 1002 void cvttss2sil(Register dst, XMMRegister src); 1003 void cvttss2siq(Register dst, XMMRegister src); 1004 1005 // Divide Scalar Double-Precision Floating-Point Values 1006 void divsd(XMMRegister dst, Address src); 1007 void divsd(XMMRegister dst, XMMRegister src); 1008 1009 // Divide Scalar Single-Precision Floating-Point Values 1010 void divss(XMMRegister dst, Address src); 1011 void divss(XMMRegister dst, XMMRegister src); 1012 1013 void emms(); 1014 1015 void fabs(); 1016 1017 void fadd(int i); 1018 1019 void fadd_d(Address src); 1020 void fadd_s(Address src); 1021 1022 // "Alternate" versions of x87 instructions place result down in FPU 1023 // stack instead of on TOS 1024 1025 void fadda(int i); // "alternate" fadd 1026 void faddp(int i = 1); 1027 1028 void fchs(); 1029 1030 void fcom(int i); 1031 1032 void fcomp(int i = 1); 1033 void fcomp_d(Address src); 1034 void fcomp_s(Address src); 1035 1036 void fcompp(); 1037 1038 void fcos(); 1039 1040 void fdecstp(); 1041 1042 void fdiv(int i); 1043 void fdiv_d(Address src); 1044 void fdivr_s(Address src); 1045 void fdiva(int i); // "alternate" fdiv 1046 void fdivp(int i = 1); 1047 1048 void fdivr(int i); 1049 void fdivr_d(Address src); 1050 void fdiv_s(Address src); 1051 1052 void fdivra(int i); // "alternate" reversed fdiv 1053 1054 void fdivrp(int i = 1); 1055 1056 void ffree(int i = 0); 1057 1058 void fild_d(Address adr); 1059 void fild_s(Address adr); 1060 1061 void fincstp(); 1062 1063 void finit(); 1064 1065 void fist_s (Address adr); 1066 void fistp_d(Address adr); 1067 void fistp_s(Address adr); 1068 1069 void fld1(); 1070 1071 void fld_d(Address adr); 1072 void fld_s(Address adr); 1073 void fld_s(int index); 1074 void fld_x(Address adr); // extended-precision (80-bit) format 1075 1076 void fldcw(Address src); 1077 1078 void fldenv(Address src); 1079 1080 void fldlg2(); 1081 1082 void fldln2(); 1083 1084 void fldz(); 1085 1086 void flog(); 1087 void flog10(); 1088 1089 void fmul(int i); 1090 1091 void fmul_d(Address src); 1092 void fmul_s(Address src); 1093 1094 void fmula(int i); // "alternate" fmul 1095 1096 void fmulp(int i = 1); 1097 1098 void fnsave(Address dst); 1099 1100 void fnstcw(Address src); 1101 1102 void fnstsw_ax(); 1103 1104 void fprem(); 1105 void fprem1(); 1106 1107 void frstor(Address src); 1108 1109 void fsin(); 1110 1111 void fsqrt(); 1112 1113 void fst_d(Address adr); 1114 void fst_s(Address adr); 1115 1116 void fstp_d(Address adr); 1117 void fstp_d(int index); 1118 void fstp_s(Address adr); 1119 void fstp_x(Address adr); // extended-precision (80-bit) format 1120 1121 void fsub(int i); 1122 void fsub_d(Address src); 1123 void fsub_s(Address src); 1124 1125 void fsuba(int i); // "alternate" fsub 1126 1127 void fsubp(int i = 1); 1128 1129 void fsubr(int i); 1130 void fsubr_d(Address src); 1131 void fsubr_s(Address src); 1132 1133 void fsubra(int i); // "alternate" reversed fsub 1134 1135 void fsubrp(int i = 1); 1136 1137 void ftan(); 1138 1139 void ftst(); 1140 1141 void fucomi(int i = 1); 1142 void fucomip(int i = 1); 1143 1144 void fwait(); 1145 1146 void fxch(int i = 1); 1147 1148 void fxrstor(Address src); 1149 1150 void fxsave(Address dst); 1151 1152 void fyl2x(); 1153 void frndint(); 1154 void f2xm1(); 1155 void fldl2e(); 1156 1157 void hlt(); 1158 1159 void idivl(Register src); 1160 void divl(Register src); // Unsigned division 1161 1162 void idivq(Register src); 1163 1164 void imull(Register dst, Register src); 1165 void imull(Register dst, Register src, int value); 1166 1167 void imulq(Register dst, Register src); 1168 void imulq(Register dst, Register src, int value); 1169 1170 1171 // jcc is the generic conditional branch generator to run- 1172 // time routines, jcc is used for branches to labels. jcc 1173 // takes a branch opcode (cc) and a label (L) and generates 1174 // either a backward branch or a forward branch and links it 1175 // to the label fixup chain. Usage: 1176 // 1177 // Label L; // unbound label 1178 // jcc(cc, L); // forward branch to unbound label 1179 // bind(L); // bind label to the current pc 1180 // jcc(cc, L); // backward branch to bound label 1181 // bind(L); // illegal: a label may be bound only once 1182 // 1183 // Note: The same Label can be used for forward and backward branches 1184 // but it may be bound only once. 1185 1186 void jcc(Condition cc, Label& L, bool maybe_short = true); 1187 1188 // Conditional jump to a 8-bit offset to L. 1189 // WARNING: be very careful using this for forward jumps. If the label is 1190 // not bound within an 8-bit offset of this instruction, a run-time error 1191 // will occur. 1192 void jccb(Condition cc, Label& L); 1193 1194 void jmp(Address entry); // pc <- entry 1195 1196 // Label operations & relative jumps (PPUM Appendix D) 1197 void jmp(Label& L, bool maybe_short = true); // unconditional jump to L 1198 1199 void jmp(Register entry); // pc <- entry 1200 1201 // Unconditional 8-bit offset jump to L. 1202 // WARNING: be very careful using this for forward jumps. If the label is 1203 // not bound within an 8-bit offset of this instruction, a run-time error 1204 // will occur. 1205 void jmpb(Label& L); 1206 1207 void ldmxcsr( Address src ); 1208 1209 void leal(Register dst, Address src); 1210 1211 void leaq(Register dst, Address src); 1212 1213 void lfence() { 1214 emit_byte(0x0F); 1215 emit_byte(0xAE); 1216 emit_byte(0xE8); 1217 } 1218 1219 void lock(); 1220 1221 void lzcntl(Register dst, Register src); 1222 1223 #ifdef _LP64 1224 void lzcntq(Register dst, Register src); 1225 #endif 1226 1227 enum Membar_mask_bits { 1228 StoreStore = 1 << 3, 1229 LoadStore = 1 << 2, 1230 StoreLoad = 1 << 1, 1231 LoadLoad = 1 << 0 1232 }; 1233 1234 // Serializes memory and blows flags 1235 void membar(Membar_mask_bits order_constraint) { 1236 if (os::is_MP()) { 1237 // We only have to handle StoreLoad 1238 if (order_constraint & StoreLoad) { 1239 // All usable chips support "locked" instructions which suffice 1240 // as barriers, and are much faster than the alternative of 1241 // using cpuid instruction. We use here a locked add [esp],0. 1242 // This is conveniently otherwise a no-op except for blowing 1243 // flags. 1244 // Any change to this code may need to revisit other places in 1245 // the code where this idiom is used, in particular the 1246 // orderAccess code. 1247 lock(); 1248 addl(Address(rsp, 0), 0);// Assert the lock# signal here 1249 } 1250 } 1251 } 1252 1253 void mfence(); 1254 1255 // Moves 1256 1257 void mov64(Register dst, int64_t imm64); 1258 1259 void movb(Address dst, Register src); 1260 void movb(Address dst, int imm8); 1261 void movb(Register dst, Address src); 1262 1263 void movdl(XMMRegister dst, Register src); 1264 void movdl(Register dst, XMMRegister src); 1265 void movdl(XMMRegister dst, Address src); 1266 void movdl(Address dst, XMMRegister src); 1267 1268 // Move Double Quadword 1269 void movdq(XMMRegister dst, Register src); 1270 void movdq(Register dst, XMMRegister src); 1271 1272 // Move Aligned Double Quadword 1273 void movdqa(XMMRegister dst, XMMRegister src); 1274 1275 // Move Unaligned Double Quadword 1276 void movdqu(Address dst, XMMRegister src); 1277 void movdqu(XMMRegister dst, Address src); 1278 void movdqu(XMMRegister dst, XMMRegister src); 1279 1280 // Move Unaligned 256bit Vector 1281 void vmovdqu(Address dst, XMMRegister src); 1282 void vmovdqu(XMMRegister dst, Address src); 1283 void vmovdqu(XMMRegister dst, XMMRegister src); 1284 1285 // Move lower 64bit to high 64bit in 128bit register 1286 void movlhps(XMMRegister dst, XMMRegister src); 1287 1288 void movl(Register dst, int32_t imm32); 1289 void movl(Address dst, int32_t imm32); 1290 void movl(Register dst, Register src); 1291 void movl(Register dst, Address src); 1292 void movl(Address dst, Register src); 1293 1294 // These dummies prevent using movl from converting a zero (like NULL) into Register 1295 // by giving the compiler two choices it can't resolve 1296 1297 void movl(Address dst, void* junk); 1298 void movl(Register dst, void* junk); 1299 1300 #ifdef _LP64 1301 void movq(Register dst, Register src); 1302 void movq(Register dst, Address src); 1303 void movq(Address dst, Register src); 1304 #endif 1305 1306 void movq(Address dst, MMXRegister src ); 1307 void movq(MMXRegister dst, Address src ); 1308 1309 #ifdef _LP64 1310 // These dummies prevent using movq from converting a zero (like NULL) into Register 1311 // by giving the compiler two choices it can't resolve 1312 1313 void movq(Address dst, void* dummy); 1314 void movq(Register dst, void* dummy); 1315 #endif 1316 1317 // Move Quadword 1318 void movq(Address dst, XMMRegister src); 1319 void movq(XMMRegister dst, Address src); 1320 1321 void movsbl(Register dst, Address src); 1322 void movsbl(Register dst, Register src); 1323 1324 #ifdef _LP64 1325 void movsbq(Register dst, Address src); 1326 void movsbq(Register dst, Register src); 1327 1328 // Move signed 32bit immediate to 64bit extending sign 1329 void movslq(Address dst, int32_t imm64); 1330 void movslq(Register dst, int32_t imm64); 1331 1332 void movslq(Register dst, Address src); 1333 void movslq(Register dst, Register src); 1334 void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous 1335 #endif 1336 1337 void movswl(Register dst, Address src); 1338 void movswl(Register dst, Register src); 1339 1340 #ifdef _LP64 1341 void movswq(Register dst, Address src); 1342 void movswq(Register dst, Register src); 1343 #endif 1344 1345 void movw(Address dst, int imm16); 1346 void movw(Register dst, Address src); 1347 void movw(Address dst, Register src); 1348 1349 void movzbl(Register dst, Address src); 1350 void movzbl(Register dst, Register src); 1351 1352 #ifdef _LP64 1353 void movzbq(Register dst, Address src); 1354 void movzbq(Register dst, Register src); 1355 #endif 1356 1357 void movzwl(Register dst, Address src); 1358 void movzwl(Register dst, Register src); 1359 1360 #ifdef _LP64 1361 void movzwq(Register dst, Address src); 1362 void movzwq(Register dst, Register src); 1363 #endif 1364 1365 void mull(Address src); 1366 void mull(Register src); 1367 1368 // Multiply Scalar Double-Precision Floating-Point Values 1369 void mulsd(XMMRegister dst, Address src); 1370 void mulsd(XMMRegister dst, XMMRegister src); 1371 1372 // Multiply Scalar Single-Precision Floating-Point Values 1373 void mulss(XMMRegister dst, Address src); 1374 void mulss(XMMRegister dst, XMMRegister src); 1375 1376 void negl(Register dst); 1377 1378 #ifdef _LP64 1379 void negq(Register dst); 1380 #endif 1381 1382 void nop(int i = 1); 1383 1384 void notl(Register dst); 1385 1386 #ifdef _LP64 1387 void notq(Register dst); 1388 #endif 1389 1390 void orl(Address dst, int32_t imm32); 1391 void orl(Register dst, int32_t imm32); 1392 void orl(Register dst, Address src); 1393 void orl(Register dst, Register src); 1394 1395 void orq(Address dst, int32_t imm32); 1396 void orq(Register dst, int32_t imm32); 1397 void orq(Register dst, Address src); 1398 void orq(Register dst, Register src); 1399 1400 // Pack with unsigned saturation 1401 void packuswb(XMMRegister dst, XMMRegister src); 1402 void packuswb(XMMRegister dst, Address src); 1403 1404 // SSE4.2 string instructions 1405 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); 1406 void pcmpestri(XMMRegister xmm1, Address src, int imm8); 1407 1408 // SSE4.1 packed move 1409 void pmovzxbw(XMMRegister dst, XMMRegister src); 1410 void pmovzxbw(XMMRegister dst, Address src); 1411 1412 #ifndef _LP64 // no 32bit push/pop on amd64 1413 void popl(Address dst); 1414 #endif 1415 1416 #ifdef _LP64 1417 void popq(Address dst); 1418 #endif 1419 1420 void popcntl(Register dst, Address src); 1421 void popcntl(Register dst, Register src); 1422 1423 #ifdef _LP64 1424 void popcntq(Register dst, Address src); 1425 void popcntq(Register dst, Register src); 1426 #endif 1427 1428 // Prefetches (SSE, SSE2, 3DNOW only) 1429 1430 void prefetchnta(Address src); 1431 void prefetchr(Address src); 1432 void prefetcht0(Address src); 1433 void prefetcht1(Address src); 1434 void prefetcht2(Address src); 1435 void prefetchw(Address src); 1436 1437 // Shuffle Packed Doublewords 1438 void pshufd(XMMRegister dst, XMMRegister src, int mode); 1439 void pshufd(XMMRegister dst, Address src, int mode); 1440 1441 // Shuffle Packed Low Words 1442 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 1443 void pshuflw(XMMRegister dst, Address src, int mode); 1444 1445 // Shift Right by bytes Logical DoubleQuadword Immediate 1446 void psrldq(XMMRegister dst, int shift); 1447 1448 // Logical Compare Double Quadword 1449 void ptest(XMMRegister dst, XMMRegister src); 1450 void ptest(XMMRegister dst, Address src); 1451 1452 // Interleave Low Bytes 1453 void punpcklbw(XMMRegister dst, XMMRegister src); 1454 void punpcklbw(XMMRegister dst, Address src); 1455 1456 // Interleave Low Doublewords 1457 void punpckldq(XMMRegister dst, XMMRegister src); 1458 void punpckldq(XMMRegister dst, Address src); 1459 1460 // Interleave Low Quadwords 1461 void punpcklqdq(XMMRegister dst, XMMRegister src); 1462 1463 #ifndef _LP64 // no 32bit push/pop on amd64 1464 void pushl(Address src); 1465 #endif 1466 1467 void pushq(Address src); 1468 1469 void rcll(Register dst, int imm8); 1470 1471 void rclq(Register dst, int imm8); 1472 1473 void ret(int imm16); 1474 1475 void sahf(); 1476 1477 void sarl(Register dst, int imm8); 1478 void sarl(Register dst); 1479 1480 void sarq(Register dst, int imm8); 1481 void sarq(Register dst); 1482 1483 void sbbl(Address dst, int32_t imm32); 1484 void sbbl(Register dst, int32_t imm32); 1485 void sbbl(Register dst, Address src); 1486 void sbbl(Register dst, Register src); 1487 1488 void sbbq(Address dst, int32_t imm32); 1489 void sbbq(Register dst, int32_t imm32); 1490 void sbbq(Register dst, Address src); 1491 void sbbq(Register dst, Register src); 1492 1493 void setb(Condition cc, Register dst); 1494 1495 void shldl(Register dst, Register src); 1496 1497 void shll(Register dst, int imm8); 1498 void shll(Register dst); 1499 1500 void shlq(Register dst, int imm8); 1501 void shlq(Register dst); 1502 1503 void shrdl(Register dst, Register src); 1504 1505 void shrl(Register dst, int imm8); 1506 void shrl(Register dst); 1507 1508 void shrq(Register dst, int imm8); 1509 void shrq(Register dst); 1510 1511 void smovl(); // QQQ generic? 1512 1513 // Compute Square Root of Scalar Double-Precision Floating-Point Value 1514 void sqrtsd(XMMRegister dst, Address src); 1515 void sqrtsd(XMMRegister dst, XMMRegister src); 1516 1517 // Compute Square Root of Scalar Single-Precision Floating-Point Value 1518 void sqrtss(XMMRegister dst, Address src); 1519 void sqrtss(XMMRegister dst, XMMRegister src); 1520 1521 void std() { emit_byte(0xfd); } 1522 1523 void stmxcsr( Address dst ); 1524 1525 void subl(Address dst, int32_t imm32); 1526 void subl(Address dst, Register src); 1527 void subl(Register dst, int32_t imm32); 1528 void subl(Register dst, Address src); 1529 void subl(Register dst, Register src); 1530 1531 void subq(Address dst, int32_t imm32); 1532 void subq(Address dst, Register src); 1533 void subq(Register dst, int32_t imm32); 1534 void subq(Register dst, Address src); 1535 void subq(Register dst, Register src); 1536 1537 // Force generation of a 4 byte immediate value even if it fits into 8bit 1538 void subl_imm32(Register dst, int32_t imm32); 1539 void subq_imm32(Register dst, int32_t imm32); 1540 1541 // Subtract Scalar Double-Precision Floating-Point Values 1542 void subsd(XMMRegister dst, Address src); 1543 void subsd(XMMRegister dst, XMMRegister src); 1544 1545 // Subtract Scalar Single-Precision Floating-Point Values 1546 void subss(XMMRegister dst, Address src); 1547 void subss(XMMRegister dst, XMMRegister src); 1548 1549 void testb(Register dst, int imm8); 1550 1551 void testl(Register dst, int32_t imm32); 1552 void testl(Register dst, Register src); 1553 void testl(Register dst, Address src); 1554 1555 void testq(Register dst, int32_t imm32); 1556 void testq(Register dst, Register src); 1557 1558 1559 // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS 1560 void ucomisd(XMMRegister dst, Address src); 1561 void ucomisd(XMMRegister dst, XMMRegister src); 1562 1563 // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS 1564 void ucomiss(XMMRegister dst, Address src); 1565 void ucomiss(XMMRegister dst, XMMRegister src); 1566 1567 void xaddl(Address dst, Register src); 1568 1569 void xaddq(Address dst, Register src); 1570 1571 void xchgl(Register reg, Address adr); 1572 void xchgl(Register dst, Register src); 1573 1574 void xchgq(Register reg, Address adr); 1575 void xchgq(Register dst, Register src); 1576 1577 // Get Value of Extended Control Register 1578 void xgetbv() { 1579 emit_byte(0x0F); 1580 emit_byte(0x01); 1581 emit_byte(0xD0); 1582 } 1583 1584 void xorl(Register dst, int32_t imm32); 1585 void xorl(Register dst, Address src); 1586 void xorl(Register dst, Register src); 1587 1588 void xorq(Register dst, Address src); 1589 void xorq(Register dst, Register src); 1590 1591 void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0 1592 1593 // AVX 3-operands scalar instructions (encoded with VEX prefix) 1594 1595 void vaddsd(XMMRegister dst, XMMRegister nds, Address src); 1596 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1597 void vaddss(XMMRegister dst, XMMRegister nds, Address src); 1598 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1599 void vdivsd(XMMRegister dst, XMMRegister nds, Address src); 1600 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1601 void vdivss(XMMRegister dst, XMMRegister nds, Address src); 1602 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1603 void vmulsd(XMMRegister dst, XMMRegister nds, Address src); 1604 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1605 void vmulss(XMMRegister dst, XMMRegister nds, Address src); 1606 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1607 void vsubsd(XMMRegister dst, XMMRegister nds, Address src); 1608 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src); 1609 void vsubss(XMMRegister dst, XMMRegister nds, Address src); 1610 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src); 1611 1612 1613 //====================VECTOR ARITHMETIC===================================== 1614 1615 // Add Packed Floating-Point Values 1616 void addpd(XMMRegister dst, XMMRegister src); 1617 void addps(XMMRegister dst, XMMRegister src); 1618 void vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1619 void vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1620 void vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1621 void vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1622 1623 // Subtract Packed Floating-Point Values 1624 void subpd(XMMRegister dst, XMMRegister src); 1625 void subps(XMMRegister dst, XMMRegister src); 1626 void vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1627 void vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1628 void vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1629 void vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1630 1631 // Multiply Packed Floating-Point Values 1632 void mulpd(XMMRegister dst, XMMRegister src); 1633 void mulps(XMMRegister dst, XMMRegister src); 1634 void vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1635 void vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1636 void vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1637 void vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1638 1639 // Divide Packed Floating-Point Values 1640 void divpd(XMMRegister dst, XMMRegister src); 1641 void divps(XMMRegister dst, XMMRegister src); 1642 void vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1643 void vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1644 void vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1645 void vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1646 1647 // Bitwise Logical AND of Packed Floating-Point Values 1648 void andpd(XMMRegister dst, XMMRegister src); 1649 void andps(XMMRegister dst, XMMRegister src); 1650 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1651 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1652 void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1653 void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1654 1655 // Bitwise Logical XOR of Packed Floating-Point Values 1656 void xorpd(XMMRegister dst, XMMRegister src); 1657 void xorps(XMMRegister dst, XMMRegister src); 1658 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1659 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1660 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1661 void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1662 1663 // Add packed integers 1664 void paddb(XMMRegister dst, XMMRegister src); 1665 void paddw(XMMRegister dst, XMMRegister src); 1666 void paddd(XMMRegister dst, XMMRegister src); 1667 void paddq(XMMRegister dst, XMMRegister src); 1668 void vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1669 void vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1670 void vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1671 void vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1672 void vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1673 void vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1674 void vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1675 void vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1676 1677 // Sub packed integers 1678 void psubb(XMMRegister dst, XMMRegister src); 1679 void psubw(XMMRegister dst, XMMRegister src); 1680 void psubd(XMMRegister dst, XMMRegister src); 1681 void psubq(XMMRegister dst, XMMRegister src); 1682 void vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1683 void vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1684 void vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1685 void vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1686 void vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1687 void vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1688 void vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1689 void vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1690 1691 // Multiply packed integers (only shorts and ints) 1692 void pmullw(XMMRegister dst, XMMRegister src); 1693 void pmulld(XMMRegister dst, XMMRegister src); 1694 void vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1695 void vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1696 void vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1697 void vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1698 1699 // Shift left packed integers 1700 void psllw(XMMRegister dst, int shift); 1701 void pslld(XMMRegister dst, int shift); 1702 void psllq(XMMRegister dst, int shift); 1703 void psllw(XMMRegister dst, XMMRegister shift); 1704 void pslld(XMMRegister dst, XMMRegister shift); 1705 void psllq(XMMRegister dst, XMMRegister shift); 1706 void vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1707 void vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1708 void vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1709 void vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1710 void vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1711 void vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1712 1713 // Logical shift right packed integers 1714 void psrlw(XMMRegister dst, int shift); 1715 void psrld(XMMRegister dst, int shift); 1716 void psrlq(XMMRegister dst, int shift); 1717 void psrlw(XMMRegister dst, XMMRegister shift); 1718 void psrld(XMMRegister dst, XMMRegister shift); 1719 void psrlq(XMMRegister dst, XMMRegister shift); 1720 void vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1721 void vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1722 void vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1723 void vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1724 void vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1725 void vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1726 1727 // Arithmetic shift right packed integers (only shorts and ints, no instructions for longs) 1728 void psraw(XMMRegister dst, int shift); 1729 void psrad(XMMRegister dst, int shift); 1730 void psraw(XMMRegister dst, XMMRegister shift); 1731 void psrad(XMMRegister dst, XMMRegister shift); 1732 void vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1733 void vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256); 1734 void vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1735 void vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256); 1736 1737 // And packed integers 1738 void pand(XMMRegister dst, XMMRegister src); 1739 void vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1740 void vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1741 1742 // Or packed integers 1743 void por(XMMRegister dst, XMMRegister src); 1744 void vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1745 void vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1746 1747 // Xor packed integers 1748 void pxor(XMMRegister dst, XMMRegister src); 1749 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256); 1750 void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256); 1751 1752 // Copy low 128bit into high 128bit of YMM registers. 1753 void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src); 1754 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src); 1755 1756 // AVX instruction which is used to clear upper 128 bits of YMM registers and 1757 // to avoid transaction penalty between AVX and SSE states. There is no 1758 // penalty if legacy SSE instructions are encoded using VEX prefix because 1759 // they always clear upper 128 bits. It should be used before calling 1760 // runtime code and native libraries. 1761 void vzeroupper(); 1762 1763 protected: 1764 // Next instructions require address alignment 16 bytes SSE mode. 1765 // They should be called only from corresponding MacroAssembler instructions. 1766 void andpd(XMMRegister dst, Address src); 1767 void andps(XMMRegister dst, Address src); 1768 void xorpd(XMMRegister dst, Address src); 1769 void xorps(XMMRegister dst, Address src); 1770 1771 }; 1772 1773 1774 // MacroAssembler extends Assembler by frequently used macros. 1775 // 1776 // Instructions for which a 'better' code sequence exists depending 1777 // on arguments should also go in here. 1778 1779 class MacroAssembler: public Assembler { 1780 friend class LIR_Assembler; 1781 friend class Runtime1; // as_Address() 1782 1783 protected: 1784 1785 Address as_Address(AddressLiteral adr); 1786 Address as_Address(ArrayAddress adr); 1787 1788 // Support for VM calls 1789 // 1790 // This is the base routine called by the different versions of call_VM_leaf. The interpreter 1791 // may customize this version by overriding it for its purposes (e.g., to save/restore 1792 // additional registers when doing a VM call). 1793 #ifdef CC_INTERP 1794 // c++ interpreter never wants to use interp_masm version of call_VM 1795 #define VIRTUAL 1796 #else 1797 #define VIRTUAL virtual 1798 #endif 1799 1800 VIRTUAL void call_VM_leaf_base( 1801 address entry_point, // the entry point 1802 int number_of_arguments // the number of arguments to pop after the call 1803 ); 1804 1805 // This is the base routine called by the different versions of call_VM. The interpreter 1806 // may customize this version by overriding it for its purposes (e.g., to save/restore 1807 // additional registers when doing a VM call). 1808 // 1809 // If no java_thread register is specified (noreg) than rdi will be used instead. call_VM_base 1810 // returns the register which contains the thread upon return. If a thread register has been 1811 // specified, the return value will correspond to that register. If no last_java_sp is specified 1812 // (noreg) than rsp will be used instead. 1813 VIRTUAL void call_VM_base( // returns the register containing the thread upon return 1814 Register oop_result, // where an oop-result ends up if any; use noreg otherwise 1815 Register java_thread, // the thread if computed before ; use noreg otherwise 1816 Register last_java_sp, // to set up last_Java_frame in stubs; use noreg otherwise 1817 address entry_point, // the entry point 1818 int number_of_arguments, // the number of arguments (w/o thread) to pop after the call 1819 bool check_exceptions // whether to check for pending exceptions after return 1820 ); 1821 1822 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 1823 // The implementation is only non-empty for the InterpreterMacroAssembler, 1824 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 1825 virtual void check_and_handle_popframe(Register java_thread); 1826 virtual void check_and_handle_earlyret(Register java_thread); 1827 1828 void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true); 1829 1830 // helpers for FPU flag access 1831 // tmp is a temporary register, if none is available use noreg 1832 void save_rax (Register tmp); 1833 void restore_rax(Register tmp); 1834 1835 public: 1836 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 1837 1838 // Support for NULL-checks 1839 // 1840 // Generates code that causes a NULL OS exception if the content of reg is NULL. 1841 // If the accessed location is M[reg + offset] and the offset is known, provide the 1842 // offset. No explicit code generation is needed if the offset is within a certain 1843 // range (0 <= offset <= page_size). 1844 1845 void null_check(Register reg, int offset = -1); 1846 static bool needs_explicit_null_check(intptr_t offset); 1847 1848 // Required platform-specific helpers for Label::patch_instructions. 1849 // They _shadow_ the declarations in AbstractAssembler, which are undefined. 1850 void pd_patch_instruction(address branch, address target); 1851 #ifndef PRODUCT 1852 static void pd_print_patched_instruction(address branch); 1853 #endif 1854 1855 // The following 4 methods return the offset of the appropriate move instruction 1856 1857 // Support for fast byte/short loading with zero extension (depending on particular CPU) 1858 int load_unsigned_byte(Register dst, Address src); 1859 int load_unsigned_short(Register dst, Address src); 1860 1861 // Support for fast byte/short loading with sign extension (depending on particular CPU) 1862 int load_signed_byte(Register dst, Address src); 1863 int load_signed_short(Register dst, Address src); 1864 1865 // Support for sign-extension (hi:lo = extend_sign(lo)) 1866 void extend_sign(Register hi, Register lo); 1867 1868 // Load and store values by size and signed-ness 1869 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2 = noreg); 1870 void store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2 = noreg); 1871 1872 // Support for inc/dec with optimal instruction selection depending on value 1873 1874 void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; } 1875 void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; } 1876 1877 void decrementl(Address dst, int value = 1); 1878 void decrementl(Register reg, int value = 1); 1879 1880 void decrementq(Register reg, int value = 1); 1881 void decrementq(Address dst, int value = 1); 1882 1883 void incrementl(Address dst, int value = 1); 1884 void incrementl(Register reg, int value = 1); 1885 1886 void incrementq(Register reg, int value = 1); 1887 void incrementq(Address dst, int value = 1); 1888 1889 1890 // Support optimal SSE move instructions. 1891 void movflt(XMMRegister dst, XMMRegister src) { 1892 if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; } 1893 else { movss (dst, src); return; } 1894 } 1895 void movflt(XMMRegister dst, Address src) { movss(dst, src); } 1896 void movflt(XMMRegister dst, AddressLiteral src); 1897 void movflt(Address dst, XMMRegister src) { movss(dst, src); } 1898 1899 void movdbl(XMMRegister dst, XMMRegister src) { 1900 if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; } 1901 else { movsd (dst, src); return; } 1902 } 1903 1904 void movdbl(XMMRegister dst, AddressLiteral src); 1905 1906 void movdbl(XMMRegister dst, Address src) { 1907 if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; } 1908 else { movlpd(dst, src); return; } 1909 } 1910 void movdbl(Address dst, XMMRegister src) { movsd(dst, src); } 1911 1912 void incrementl(AddressLiteral dst); 1913 void incrementl(ArrayAddress dst); 1914 1915 // Alignment 1916 void align(int modulus); 1917 1918 // A 5 byte nop that is safe for patching (see patch_verified_entry) 1919 void fat_nop(); 1920 1921 // Stack frame creation/removal 1922 void enter(); 1923 void leave(); 1924 1925 // Support for getting the JavaThread pointer (i.e.; a reference to thread-local information) 1926 // The pointer will be loaded into the thread register. 1927 void get_thread(Register thread); 1928 1929 1930 // Support for VM calls 1931 // 1932 // It is imperative that all calls into the VM are handled via the call_VM macros. 1933 // They make sure that the stack linkage is setup correctly. call_VM's correspond 1934 // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points. 1935 1936 1937 void call_VM(Register oop_result, 1938 address entry_point, 1939 bool check_exceptions = true); 1940 void call_VM(Register oop_result, 1941 address entry_point, 1942 Register arg_1, 1943 bool check_exceptions = true); 1944 void call_VM(Register oop_result, 1945 address entry_point, 1946 Register arg_1, Register arg_2, 1947 bool check_exceptions = true); 1948 void call_VM(Register oop_result, 1949 address entry_point, 1950 Register arg_1, Register arg_2, Register arg_3, 1951 bool check_exceptions = true); 1952 1953 // Overloadings with last_Java_sp 1954 void call_VM(Register oop_result, 1955 Register last_java_sp, 1956 address entry_point, 1957 int number_of_arguments = 0, 1958 bool check_exceptions = true); 1959 void call_VM(Register oop_result, 1960 Register last_java_sp, 1961 address entry_point, 1962 Register arg_1, bool 1963 check_exceptions = true); 1964 void call_VM(Register oop_result, 1965 Register last_java_sp, 1966 address entry_point, 1967 Register arg_1, Register arg_2, 1968 bool check_exceptions = true); 1969 void call_VM(Register oop_result, 1970 Register last_java_sp, 1971 address entry_point, 1972 Register arg_1, Register arg_2, Register arg_3, 1973 bool check_exceptions = true); 1974 1975 // These always tightly bind to MacroAssembler::call_VM_base 1976 // bypassing the virtual implementation 1977 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); 1978 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); 1979 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 1980 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 1981 void super_call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4, bool check_exceptions = true); 1982 1983 void call_VM_leaf(address entry_point, 1984 int number_of_arguments = 0); 1985 void call_VM_leaf(address entry_point, 1986 Register arg_1); 1987 void call_VM_leaf(address entry_point, 1988 Register arg_1, Register arg_2); 1989 void call_VM_leaf(address entry_point, 1990 Register arg_1, Register arg_2, Register arg_3); 1991 1992 // These always tightly bind to MacroAssembler::call_VM_leaf_base 1993 // bypassing the virtual implementation 1994 void super_call_VM_leaf(address entry_point); 1995 void super_call_VM_leaf(address entry_point, Register arg_1); 1996 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 1997 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 1998 void super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3, Register arg_4); 1999 2000 // last Java Frame (fills frame anchor) 2001 void set_last_Java_frame(Register thread, 2002 Register last_java_sp, 2003 Register last_java_fp, 2004 address last_java_pc); 2005 2006 // thread in the default location (r15_thread on 64bit) 2007 void set_last_Java_frame(Register last_java_sp, 2008 Register last_java_fp, 2009 address last_java_pc); 2010 2011 void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc); 2012 2013 // thread in the default location (r15_thread on 64bit) 2014 void reset_last_Java_frame(bool clear_fp, bool clear_pc); 2015 2016 // Stores 2017 void store_check(Register obj); // store check for obj - register is destroyed afterwards 2018 void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed) 2019 2020 #ifndef SERIALGC 2021 2022 void g1_write_barrier_pre(Register obj, 2023 Register pre_val, 2024 Register thread, 2025 Register tmp, 2026 bool tosca_live, 2027 bool expand_call); 2028 2029 void g1_write_barrier_post(Register store_addr, 2030 Register new_val, 2031 Register thread, 2032 Register tmp, 2033 Register tmp2); 2034 2035 #endif // SERIALGC 2036 2037 // split store_check(Register obj) to enhance instruction interleaving 2038 void store_check_part_1(Register obj); 2039 void store_check_part_2(Register obj); 2040 2041 // C 'boolean' to Java boolean: x == 0 ? 0 : 1 2042 void c2bool(Register x); 2043 2044 // C++ bool manipulation 2045 2046 void movbool(Register dst, Address src); 2047 void movbool(Address dst, bool boolconst); 2048 void movbool(Address dst, Register src); 2049 void testbool(Register dst); 2050 2051 // oop manipulations 2052 void load_klass(Register dst, Register src); 2053 void store_klass(Register dst, Register src); 2054 2055 void load_heap_oop(Register dst, Address src); 2056 void load_heap_oop_not_null(Register dst, Address src); 2057 void store_heap_oop(Address dst, Register src); 2058 2059 // Used for storing NULL. All other oop constants should be 2060 // stored using routines that take a jobject. 2061 void store_heap_oop_null(Address dst); 2062 2063 void load_prototype_header(Register dst, Register src); 2064 2065 #ifdef _LP64 2066 void store_klass_gap(Register dst, Register src); 2067 2068 // This dummy is to prevent a call to store_heap_oop from 2069 // converting a zero (like NULL) into a Register by giving 2070 // the compiler two choices it can't resolve 2071 2072 void store_heap_oop(Address dst, void* dummy); 2073 2074 void encode_heap_oop(Register r); 2075 void decode_heap_oop(Register r); 2076 void encode_heap_oop_not_null(Register r); 2077 void decode_heap_oop_not_null(Register r); 2078 void encode_heap_oop_not_null(Register dst, Register src); 2079 void decode_heap_oop_not_null(Register dst, Register src); 2080 2081 void set_narrow_oop(Register dst, jobject obj); 2082 void set_narrow_oop(Address dst, jobject obj); 2083 void cmp_narrow_oop(Register dst, jobject obj); 2084 void cmp_narrow_oop(Address dst, jobject obj); 2085 2086 // if heap base register is used - reinit it with the correct value 2087 void reinit_heapbase(); 2088 2089 DEBUG_ONLY(void verify_heapbase(const char* msg);) 2090 2091 #endif // _LP64 2092 2093 // Int division/remainder for Java 2094 // (as idivl, but checks for special case as described in JVM spec.) 2095 // returns idivl instruction offset for implicit exception handling 2096 int corrected_idivl(Register reg); 2097 2098 // Long division/remainder for Java 2099 // (as idivq, but checks for special case as described in JVM spec.) 2100 // returns idivq instruction offset for implicit exception handling 2101 int corrected_idivq(Register reg); 2102 2103 void int3(); 2104 2105 // Long operation macros for a 32bit cpu 2106 // Long negation for Java 2107 void lneg(Register hi, Register lo); 2108 2109 // Long multiplication for Java 2110 // (destroys contents of eax, ebx, ecx and edx) 2111 void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y 2112 2113 // Long shifts for Java 2114 // (semantics as described in JVM spec.) 2115 void lshl(Register hi, Register lo); // hi:lo << (rcx & 0x3f) 2116 void lshr(Register hi, Register lo, bool sign_extension = false); // hi:lo >> (rcx & 0x3f) 2117 2118 // Long compare for Java 2119 // (semantics as described in JVM spec.) 2120 void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y) 2121 2122 2123 // misc 2124 2125 // Sign extension 2126 void sign_extend_short(Register reg); 2127 void sign_extend_byte(Register reg); 2128 2129 // Division by power of 2, rounding towards 0 2130 void division_with_shift(Register reg, int shift_value); 2131 2132 // Compares the top-most stack entries on the FPU stack and sets the eflags as follows: 2133 // 2134 // CF (corresponds to C0) if x < y 2135 // PF (corresponds to C2) if unordered 2136 // ZF (corresponds to C3) if x = y 2137 // 2138 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 2139 // tmp is a temporary register, if none is available use noreg (only matters for non-P6 code) 2140 void fcmp(Register tmp); 2141 // Variant of the above which allows y to be further down the stack 2142 // and which only pops x and y if specified. If pop_right is 2143 // specified then pop_left must also be specified. 2144 void fcmp(Register tmp, int index, bool pop_left, bool pop_right); 2145 2146 // Floating-point comparison for Java 2147 // Compares the top-most stack entries on the FPU stack and stores the result in dst. 2148 // The arguments are in reversed order on the stack (i.e., top of stack is first argument). 2149 // (semantics as described in JVM spec.) 2150 void fcmp2int(Register dst, bool unordered_is_less); 2151 // Variant of the above which allows y to be further down the stack 2152 // and which only pops x and y if specified. If pop_right is 2153 // specified then pop_left must also be specified. 2154 void fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right); 2155 2156 // Floating-point remainder for Java (ST0 = ST0 fremr ST1, ST1 is empty afterwards) 2157 // tmp is a temporary register, if none is available use noreg 2158 void fremr(Register tmp); 2159 2160 2161 // same as fcmp2int, but using SSE2 2162 void cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 2163 void cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less); 2164 2165 // Inlined sin/cos generator for Java; must not use CPU instruction 2166 // directly on Intel as it does not have high enough precision 2167 // outside of the range [-pi/4, pi/4]. Extra argument indicate the 2168 // number of FPU stack slots in use; all but the topmost will 2169 // require saving if a slow case is necessary. Assumes argument is 2170 // on FP TOS; result is on FP TOS. No cpu registers are changed by 2171 // this code. 2172 void trigfunc(char trig, int num_fpu_regs_in_use = 1); 2173 2174 // branch to L if FPU flag C2 is set/not set 2175 // tmp is a temporary register, if none is available use noreg 2176 void jC2 (Register tmp, Label& L); 2177 void jnC2(Register tmp, Label& L); 2178 2179 // Pop ST (ffree & fincstp combined) 2180 void fpop(); 2181 2182 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack 2183 void push_fTOS(); 2184 2185 // pops double TOS element from CPU stack and pushes on FPU stack 2186 void pop_fTOS(); 2187 2188 void empty_FPU_stack(); 2189 2190 void push_IU_state(); 2191 void pop_IU_state(); 2192 2193 void push_FPU_state(); 2194 void pop_FPU_state(); 2195 2196 void push_CPU_state(); 2197 void pop_CPU_state(); 2198 2199 // Round up to a power of two 2200 void round_to(Register reg, int modulus); 2201 2202 // Callee saved registers handling 2203 void push_callee_saved_registers(); 2204 void pop_callee_saved_registers(); 2205 2206 // allocation 2207 void eden_allocate( 2208 Register obj, // result: pointer to object after successful allocation 2209 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2210 int con_size_in_bytes, // object size in bytes if known at compile time 2211 Register t1, // temp register 2212 Label& slow_case // continuation point if fast allocation fails 2213 ); 2214 void tlab_allocate( 2215 Register obj, // result: pointer to object after successful allocation 2216 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise 2217 int con_size_in_bytes, // object size in bytes if known at compile time 2218 Register t1, // temp register 2219 Register t2, // temp register 2220 Label& slow_case // continuation point if fast allocation fails 2221 ); 2222 Register tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case); // returns TLS address 2223 void incr_allocated_bytes(Register thread, 2224 Register var_size_in_bytes, int con_size_in_bytes, 2225 Register t1 = noreg); 2226 2227 // interface method calling 2228 void lookup_interface_method(Register recv_klass, 2229 Register intf_klass, 2230 RegisterOrConstant itable_index, 2231 Register method_result, 2232 Register scan_temp, 2233 Label& no_such_interface); 2234 2235 // Test sub_klass against super_klass, with fast and slow paths. 2236 2237 // The fast path produces a tri-state answer: yes / no / maybe-slow. 2238 // One of the three labels can be NULL, meaning take the fall-through. 2239 // If super_check_offset is -1, the value is loaded up from super_klass. 2240 // No registers are killed, except temp_reg. 2241 void check_klass_subtype_fast_path(Register sub_klass, 2242 Register super_klass, 2243 Register temp_reg, 2244 Label* L_success, 2245 Label* L_failure, 2246 Label* L_slow_path, 2247 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 2248 2249 // The rest of the type check; must be wired to a corresponding fast path. 2250 // It does not repeat the fast path logic, so don't use it standalone. 2251 // The temp_reg and temp2_reg can be noreg, if no temps are available. 2252 // Updates the sub's secondary super cache as necessary. 2253 // If set_cond_codes, condition codes will be Z on success, NZ on failure. 2254 void check_klass_subtype_slow_path(Register sub_klass, 2255 Register super_klass, 2256 Register temp_reg, 2257 Register temp2_reg, 2258 Label* L_success, 2259 Label* L_failure, 2260 bool set_cond_codes = false); 2261 2262 // Simplified, combined version, good for typical uses. 2263 // Falls through on failure. 2264 void check_klass_subtype(Register sub_klass, 2265 Register super_klass, 2266 Register temp_reg, 2267 Label& L_success); 2268 2269 // method handles (JSR 292) 2270 void check_method_handle_type(Register mtype_reg, Register mh_reg, 2271 Register temp_reg, 2272 Label& wrong_method_type); 2273 void load_method_handle_vmslots(Register vmslots_reg, Register mh_reg, 2274 Register temp_reg); 2275 void jump_to_method_handle_entry(Register mh_reg, Register temp_reg); 2276 Address argument_address(RegisterOrConstant arg_slot, int extra_slot_offset = 0); 2277 2278 2279 //---- 2280 void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0 2281 2282 // Debugging 2283 2284 // only if +VerifyOops 2285 void verify_oop(Register reg, const char* s = "broken oop"); 2286 void verify_oop_addr(Address addr, const char * s = "broken oop addr"); 2287 2288 // only if +VerifyFPU 2289 void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); 2290 2291 // prints msg, dumps registers and stops execution 2292 void stop(const char* msg); 2293 2294 // prints msg and continues 2295 void warn(const char* msg); 2296 2297 static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg); 2298 static void debug64(char* msg, int64_t pc, int64_t regs[]); 2299 2300 void os_breakpoint(); 2301 2302 void untested() { stop("untested"); } 2303 2304 void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, 1024, "unimplemented: %s", what); stop(b); } 2305 2306 void should_not_reach_here() { stop("should not reach here"); } 2307 2308 void print_CPU_state(); 2309 2310 // Stack overflow checking 2311 void bang_stack_with_offset(int offset) { 2312 // stack grows down, caller passes positive offset 2313 assert(offset > 0, "must bang with negative offset"); 2314 movl(Address(rsp, (-offset)), rax); 2315 } 2316 2317 // Writes to stack successive pages until offset reached to check for 2318 // stack overflow + shadow pages. Also, clobbers tmp 2319 void bang_stack_size(Register size, Register tmp); 2320 2321 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 2322 Register tmp, 2323 int offset); 2324 2325 // Support for serializing memory accesses between threads 2326 void serialize_memory(Register thread, Register tmp); 2327 2328 void verify_tlab(); 2329 2330 // Biased locking support 2331 // lock_reg and obj_reg must be loaded up with the appropriate values. 2332 // swap_reg must be rax, and is killed. 2333 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will 2334 // be killed; if not supplied, push/pop will be used internally to 2335 // allocate a temporary (inefficient, avoid if possible). 2336 // Optional slow case is for implementations (interpreter and C1) which branch to 2337 // slow case directly. Leaves condition codes set for C2's Fast_Lock node. 2338 // Returns offset of first potentially-faulting instruction for null 2339 // check info (currently consumed only by C1). If 2340 // swap_reg_contains_mark is true then returns -1 as it is assumed 2341 // the calling code has already passed any potential faults. 2342 int biased_locking_enter(Register lock_reg, Register obj_reg, 2343 Register swap_reg, Register tmp_reg, 2344 bool swap_reg_contains_mark, 2345 Label& done, Label* slow_case = NULL, 2346 BiasedLockingCounters* counters = NULL); 2347 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); 2348 2349 2350 Condition negate_condition(Condition cond); 2351 2352 // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit 2353 // operands. In general the names are modified to avoid hiding the instruction in Assembler 2354 // so that we don't need to implement all the varieties in the Assembler with trivial wrappers 2355 // here in MacroAssembler. The major exception to this rule is call 2356 2357 // Arithmetics 2358 2359 2360 void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; } 2361 void addptr(Address dst, Register src); 2362 2363 void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); } 2364 void addptr(Register dst, int32_t src); 2365 void addptr(Register dst, Register src); 2366 void addptr(Register dst, RegisterOrConstant src) { 2367 if (src.is_constant()) addptr(dst, (int) src.as_constant()); 2368 else addptr(dst, src.as_register()); 2369 } 2370 2371 void andptr(Register dst, int32_t src); 2372 void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; } 2373 2374 void cmp8(AddressLiteral src1, int imm); 2375 2376 // renamed to drag out the casting of address to int32_t/intptr_t 2377 void cmp32(Register src1, int32_t imm); 2378 2379 void cmp32(AddressLiteral src1, int32_t imm); 2380 // compare reg - mem, or reg - &mem 2381 void cmp32(Register src1, AddressLiteral src2); 2382 2383 void cmp32(Register src1, Address src2); 2384 2385 #ifndef _LP64 2386 void cmpoop(Address dst, jobject obj); 2387 void cmpoop(Register dst, jobject obj); 2388 #endif // _LP64 2389 2390 // NOTE src2 must be the lval. This is NOT an mem-mem compare 2391 void cmpptr(Address src1, AddressLiteral src2); 2392 2393 void cmpptr(Register src1, AddressLiteral src2); 2394 2395 void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 2396 void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 2397 // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 2398 2399 void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 2400 void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; } 2401 2402 // cmp64 to avoild hiding cmpq 2403 void cmp64(Register src1, AddressLiteral src); 2404 2405 void cmpxchgptr(Register reg, Address adr); 2406 2407 void locked_cmpxchgptr(Register reg, AddressLiteral adr); 2408 2409 2410 void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); } 2411 2412 2413 void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); } 2414 2415 void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); } 2416 2417 void shlptr(Register dst, int32_t shift); 2418 void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); } 2419 2420 void shrptr(Register dst, int32_t shift); 2421 void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); } 2422 2423 void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); } 2424 void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); } 2425 2426 void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 2427 2428 void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); } 2429 void subptr(Register dst, int32_t src); 2430 // Force generation of a 4 byte immediate value even if it fits into 8bit 2431 void subptr_imm32(Register dst, int32_t src); 2432 void subptr(Register dst, Register src); 2433 void subptr(Register dst, RegisterOrConstant src) { 2434 if (src.is_constant()) subptr(dst, (int) src.as_constant()); 2435 else subptr(dst, src.as_register()); 2436 } 2437 2438 void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 2439 void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); } 2440 2441 void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 2442 void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; } 2443 2444 void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; } 2445 2446 2447 2448 // Helper functions for statistics gathering. 2449 // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes. 2450 void cond_inc32(Condition cond, AddressLiteral counter_addr); 2451 // Unconditional atomic increment. 2452 void atomic_incl(AddressLiteral counter_addr); 2453 2454 void lea(Register dst, AddressLiteral adr); 2455 void lea(Address dst, AddressLiteral adr); 2456 void lea(Register dst, Address adr) { Assembler::lea(dst, adr); } 2457 2458 void leal32(Register dst, Address src) { leal(dst, src); } 2459 2460 // Import other testl() methods from the parent class or else 2461 // they will be hidden by the following overriding declaration. 2462 using Assembler::testl; 2463 void testl(Register dst, AddressLiteral src); 2464 2465 void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 2466 void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 2467 void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); } 2468 2469 void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); } 2470 void testptr(Register src1, Register src2); 2471 2472 void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 2473 void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); } 2474 2475 // Calls 2476 2477 void call(Label& L, relocInfo::relocType rtype); 2478 void call(Register entry); 2479 2480 // NOTE: this call tranfers to the effective address of entry NOT 2481 // the address contained by entry. This is because this is more natural 2482 // for jumps/calls. 2483 void call(AddressLiteral entry); 2484 2485 // Jumps 2486 2487 // NOTE: these jumps tranfer to the effective address of dst NOT 2488 // the address contained by dst. This is because this is more natural 2489 // for jumps/calls. 2490 void jump(AddressLiteral dst); 2491 void jump_cc(Condition cc, AddressLiteral dst); 2492 2493 // 32bit can do a case table jump in one instruction but we no longer allow the base 2494 // to be installed in the Address class. This jump will tranfers to the address 2495 // contained in the location described by entry (not the address of entry) 2496 void jump(ArrayAddress entry); 2497 2498 // Floating 2499 2500 void andpd(XMMRegister dst, Address src) { Assembler::andpd(dst, src); } 2501 void andpd(XMMRegister dst, AddressLiteral src); 2502 2503 void andps(XMMRegister dst, XMMRegister src) { Assembler::andps(dst, src); } 2504 void andps(XMMRegister dst, Address src) { Assembler::andps(dst, src); } 2505 void andps(XMMRegister dst, AddressLiteral src); 2506 2507 void comiss(XMMRegister dst, XMMRegister src) { Assembler::comiss(dst, src); } 2508 void comiss(XMMRegister dst, Address src) { Assembler::comiss(dst, src); } 2509 void comiss(XMMRegister dst, AddressLiteral src); 2510 2511 void comisd(XMMRegister dst, XMMRegister src) { Assembler::comisd(dst, src); } 2512 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 2513 void comisd(XMMRegister dst, AddressLiteral src); 2514 2515 void fadd_s(Address src) { Assembler::fadd_s(src); } 2516 void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 2517 2518 void fldcw(Address src) { Assembler::fldcw(src); } 2519 void fldcw(AddressLiteral src); 2520 2521 void fld_s(int index) { Assembler::fld_s(index); } 2522 void fld_s(Address src) { Assembler::fld_s(src); } 2523 void fld_s(AddressLiteral src); 2524 2525 void fld_d(Address src) { Assembler::fld_d(src); } 2526 void fld_d(AddressLiteral src); 2527 2528 void fld_x(Address src) { Assembler::fld_x(src); } 2529 void fld_x(AddressLiteral src); 2530 2531 void fmul_s(Address src) { Assembler::fmul_s(src); } 2532 void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 2533 2534 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 2535 void ldmxcsr(AddressLiteral src); 2536 2537 // compute pow(x,y) and exp(x) with x86 instructions. Don't cover 2538 // all corner cases and may result in NaN and require fallback to a 2539 // runtime call. 2540 void fast_pow(); 2541 void fast_exp(); 2542 void increase_precision(); 2543 void restore_precision(); 2544 2545 // computes exp(x). Fallback to runtime call included. 2546 void exp_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(true, num_fpu_regs_in_use); } 2547 // computes pow(x,y). Fallback to runtime call included. 2548 void pow_with_fallback(int num_fpu_regs_in_use) { pow_or_exp(false, num_fpu_regs_in_use); } 2549 2550 private: 2551 2552 // call runtime as a fallback for trig functions and pow/exp. 2553 void fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use); 2554 2555 // computes 2^(Ylog2X); Ylog2X in ST(0) 2556 void pow_exp_core_encoding(); 2557 2558 // computes pow(x,y) or exp(x). Fallback to runtime call included. 2559 void pow_or_exp(bool is_exp, int num_fpu_regs_in_use); 2560 2561 // these are private because users should be doing movflt/movdbl 2562 2563 void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); } 2564 void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); } 2565 void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); } 2566 void movss(XMMRegister dst, AddressLiteral src); 2567 2568 void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); } 2569 void movlpd(XMMRegister dst, AddressLiteral src); 2570 2571 public: 2572 2573 void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 2574 void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 2575 void addsd(XMMRegister dst, AddressLiteral src); 2576 2577 void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 2578 void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 2579 void addss(XMMRegister dst, AddressLiteral src); 2580 2581 void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 2582 void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 2583 void divsd(XMMRegister dst, AddressLiteral src); 2584 2585 void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 2586 void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 2587 void divss(XMMRegister dst, AddressLiteral src); 2588 2589 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 2590 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 2591 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 2592 void movsd(XMMRegister dst, AddressLiteral src); 2593 2594 void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 2595 void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 2596 void mulsd(XMMRegister dst, AddressLiteral src); 2597 2598 void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 2599 void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 2600 void mulss(XMMRegister dst, AddressLiteral src); 2601 2602 void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 2603 void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 2604 void sqrtsd(XMMRegister dst, AddressLiteral src); 2605 2606 void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 2607 void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 2608 void sqrtss(XMMRegister dst, AddressLiteral src); 2609 2610 void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 2611 void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 2612 void subsd(XMMRegister dst, AddressLiteral src); 2613 2614 void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 2615 void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 2616 void subss(XMMRegister dst, AddressLiteral src); 2617 2618 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 2619 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); } 2620 void ucomiss(XMMRegister dst, AddressLiteral src); 2621 2622 void ucomisd(XMMRegister dst, XMMRegister src) { Assembler::ucomisd(dst, src); } 2623 void ucomisd(XMMRegister dst, Address src) { Assembler::ucomisd(dst, src); } 2624 void ucomisd(XMMRegister dst, AddressLiteral src); 2625 2626 // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values 2627 void xorpd(XMMRegister dst, XMMRegister src) { Assembler::xorpd(dst, src); } 2628 void xorpd(XMMRegister dst, Address src) { Assembler::xorpd(dst, src); } 2629 void xorpd(XMMRegister dst, AddressLiteral src); 2630 2631 // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values 2632 void xorps(XMMRegister dst, XMMRegister src) { Assembler::xorps(dst, src); } 2633 void xorps(XMMRegister dst, Address src) { Assembler::xorps(dst, src); } 2634 void xorps(XMMRegister dst, AddressLiteral src); 2635 2636 // AVX 3-operands instructions 2637 2638 void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddsd(dst, nds, src); } 2639 void vaddsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddsd(dst, nds, src); } 2640 void vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2641 2642 void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vaddss(dst, nds, src); } 2643 void vaddss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vaddss(dst, nds, src); } 2644 void vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2645 2646 void vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); } 2647 void vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandpd(dst, nds, src, vector256); } 2648 void vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); 2649 2650 void vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); } 2651 void vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vandps(dst, nds, src, vector256); } 2652 void vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); 2653 2654 void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivsd(dst, nds, src); } 2655 void vdivsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivsd(dst, nds, src); } 2656 void vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2657 2658 void vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vdivss(dst, nds, src); } 2659 void vdivss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vdivss(dst, nds, src); } 2660 void vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2661 2662 void vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulsd(dst, nds, src); } 2663 void vmulsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulsd(dst, nds, src); } 2664 void vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2665 2666 void vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vmulss(dst, nds, src); } 2667 void vmulss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vmulss(dst, nds, src); } 2668 void vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2669 2670 void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubsd(dst, nds, src); } 2671 void vsubsd(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubsd(dst, nds, src); } 2672 void vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2673 2674 void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) { Assembler::vsubss(dst, nds, src); } 2675 void vsubss(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vsubss(dst, nds, src); } 2676 void vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src); 2677 2678 // AVX Vector instructions 2679 2680 void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } 2681 void vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorpd(dst, nds, src, vector256); } 2682 void vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); 2683 2684 void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } 2685 void vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { Assembler::vxorps(dst, nds, src, vector256); } 2686 void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256); 2687 2688 void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) { 2689 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 2690 Assembler::vpxor(dst, nds, src, vector256); 2691 else 2692 Assembler::vxorpd(dst, nds, src, vector256); 2693 } 2694 void vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) { 2695 if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2 2696 Assembler::vpxor(dst, nds, src, vector256); 2697 else 2698 Assembler::vxorpd(dst, nds, src, vector256); 2699 } 2700 2701 // Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector. 2702 void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) { 2703 if (UseAVX > 1) // vinserti128h is available only in AVX2 2704 Assembler::vinserti128h(dst, nds, src); 2705 else 2706 Assembler::vinsertf128h(dst, nds, src); 2707 } 2708 2709 // Data 2710 2711 void cmov32( Condition cc, Register dst, Address src); 2712 void cmov32( Condition cc, Register dst, Register src); 2713 2714 void cmov( Condition cc, Register dst, Register src) { cmovptr(cc, dst, src); } 2715 2716 void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2717 void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmov32(cc, dst, src)); } 2718 2719 void movoop(Register dst, jobject obj); 2720 void movoop(Address dst, jobject obj); 2721 2722 void movptr(ArrayAddress dst, Register src); 2723 // can this do an lea? 2724 void movptr(Register dst, ArrayAddress src); 2725 2726 void movptr(Register dst, Address src); 2727 2728 void movptr(Register dst, AddressLiteral src); 2729 2730 void movptr(Register dst, intptr_t src); 2731 void movptr(Register dst, Register src); 2732 void movptr(Address dst, intptr_t src); 2733 2734 void movptr(Address dst, Register src); 2735 2736 void movptr(Register dst, RegisterOrConstant src) { 2737 if (src.is_constant()) movptr(dst, src.as_constant()); 2738 else movptr(dst, src.as_register()); 2739 } 2740 2741 #ifdef _LP64 2742 // Generally the next two are only used for moving NULL 2743 // Although there are situations in initializing the mark word where 2744 // they could be used. They are dangerous. 2745 2746 // They only exist on LP64 so that int32_t and intptr_t are not the same 2747 // and we have ambiguous declarations. 2748 2749 void movptr(Address dst, int32_t imm32); 2750 void movptr(Register dst, int32_t imm32); 2751 #endif // _LP64 2752 2753 // to avoid hiding movl 2754 void mov32(AddressLiteral dst, Register src); 2755 void mov32(Register dst, AddressLiteral src); 2756 2757 // to avoid hiding movb 2758 void movbyte(ArrayAddress dst, int src); 2759 2760 // Import other mov() methods from the parent class or else 2761 // they will be hidden by the following overriding declaration. 2762 using Assembler::movdl; 2763 using Assembler::movq; 2764 void movdl(XMMRegister dst, AddressLiteral src); 2765 void movq(XMMRegister dst, AddressLiteral src); 2766 2767 // Can push value or effective address 2768 void pushptr(AddressLiteral src); 2769 2770 void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); } 2771 void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); } 2772 2773 void pushoop(jobject obj); 2774 2775 // sign extend as need a l to ptr sized element 2776 void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); } 2777 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 2778 2779 // C2 compiled method's prolog code. 2780 void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b); 2781 2782 // IndexOf strings. 2783 // Small strings are loaded through stack if they cross page boundary. 2784 void string_indexof(Register str1, Register str2, 2785 Register cnt1, Register cnt2, 2786 int int_cnt2, Register result, 2787 XMMRegister vec, Register tmp); 2788 2789 // IndexOf for constant substrings with size >= 8 elements 2790 // which don't need to be loaded through stack. 2791 void string_indexofC8(Register str1, Register str2, 2792 Register cnt1, Register cnt2, 2793 int int_cnt2, Register result, 2794 XMMRegister vec, Register tmp); 2795 2796 // Smallest code: we don't need to load through stack, 2797 // check string tail. 2798 2799 // Compare strings. 2800 void string_compare(Register str1, Register str2, 2801 Register cnt1, Register cnt2, Register result, 2802 XMMRegister vec1); 2803 2804 // Compare char[] arrays. 2805 void char_arrays_equals(bool is_array_equ, Register ary1, Register ary2, 2806 Register limit, Register result, Register chr, 2807 XMMRegister vec1, XMMRegister vec2); 2808 2809 // Fill primitive arrays 2810 void generate_fill(BasicType t, bool aligned, 2811 Register to, Register value, Register count, 2812 Register rtmp, XMMRegister xtmp); 2813 2814 #undef VIRTUAL 2815 2816 }; 2817 2818 /** 2819 * class SkipIfEqual: 2820 * 2821 * Instantiating this class will result in assembly code being output that will 2822 * jump around any code emitted between the creation of the instance and it's 2823 * automatic destruction at the end of a scope block, depending on the value of 2824 * the flag passed to the constructor, which will be checked at run-time. 2825 */ 2826 class SkipIfEqual { 2827 private: 2828 MacroAssembler* _masm; 2829 Label _label; 2830 2831 public: 2832 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value); 2833 ~SkipIfEqual(); 2834 }; 2835 2836 #ifdef ASSERT 2837 inline bool AbstractAssembler::pd_check_instruction_mark() { return true; } 2838 #endif 2839 2840 #endif // CPU_X86_VM_ASSEMBLER_X86_HPP