1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef CPU_S390_VM_MACROASSEMBLER_S390_HPP 27 #define CPU_S390_VM_MACROASSEMBLER_S390_HPP 28 29 #include "asm/assembler.hpp" 30 31 #define MODERN_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name) 32 #define CLASSIC_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name) 33 #define MODERN_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name) 34 #define CLASSIC_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name) 35 36 class MacroAssembler: public Assembler { 37 public: 38 MacroAssembler(CodeBuffer* code) : Assembler(code) {} 39 40 // 41 // Optimized instruction emitters 42 // 43 44 // Move register if destination register and target register are different. 45 void lr_if_needed(Register rd, Register rs); 46 void lgr_if_needed(Register rd, Register rs); 47 void llgfr_if_needed(Register rd, Register rs); 48 void ldr_if_needed(FloatRegister rd, FloatRegister rs); 49 50 void move_reg_if_needed(Register dest, BasicType dest_type, Register src, BasicType src_type); 51 void move_freg_if_needed(FloatRegister dest, BasicType dest_type, FloatRegister src, BasicType src_type); 52 53 void freg2mem_opt(FloatRegister reg, 54 int64_t disp, 55 Register index, 56 Register base, 57 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 58 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 59 Register scratch = Z_R0); 60 void freg2mem_opt(FloatRegister reg, 61 const Address &a, bool is_double = true); 62 63 void mem2freg_opt(FloatRegister reg, 64 int64_t disp, 65 Register index, 66 Register base, 67 void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register), 68 void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register), 69 Register scratch = Z_R0); 70 void mem2freg_opt(FloatRegister reg, 71 const Address &a, bool is_double = true); 72 73 void reg2mem_opt(Register reg, 74 int64_t disp, 75 Register index, 76 Register base, 77 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 78 void (MacroAssembler::*classic)(Register, int64_t, Register, Register), 79 Register scratch = Z_R0); 80 // returns offset of the store instruction 81 int reg2mem_opt(Register reg, const Address &a, bool is_double = true); 82 83 void mem2reg_opt(Register reg, 84 int64_t disp, 85 Register index, 86 Register base, 87 void (MacroAssembler::*modern) (Register, int64_t, Register, Register), 88 void (MacroAssembler::*classic)(Register, int64_t, Register, Register)); 89 void mem2reg_opt(Register reg, const Address &a, bool is_double = true); 90 void mem2reg_signed_opt(Register reg, const Address &a); 91 92 // AND immediate and set condition code, works for 64 bit immediates/operation as well. 93 void and_imm(Register r, long mask, Register tmp = Z_R0, bool wide = false); 94 95 // 1's complement, 32bit or 64bit. Optimized to exploit distinct operands facility. 96 // Note: The condition code is neither preserved nor correctly set by this code!!! 97 // Note: (wide == false) does not protect the high order half of the target register 98 // from alternation. It only serves as optimization hint for 32-bit results. 99 void not_(Register r1, Register r2 = noreg, bool wide = false); // r1 = ~r2 100 101 // Expanded support of all "rotate_then_<logicalOP>" instructions. 102 // 103 // Generalize and centralize rotate_then_<logicalOP> emitter. 104 // Functional description. For details, see Principles of Operation, Chapter 7, "Rotate Then Insert..." 105 // - Bits in a register are numbered left (most significant) to right (least significant), i.e. [0..63]. 106 // - Bytes in a register are numbered left (most significant) to right (least significant), i.e. [0..7]. 107 // - Register src is rotated to the left by (nRotate&0x3f) positions. 108 // - Negative values for nRotate result in a rotation to the right by abs(nRotate) positions. 109 // - The bits in positions [lBitPos..rBitPos] of the _ROTATED_ src operand take part in the 110 // logical operation performed on the contents (in those positions) of the dst operand. 111 // - The logical operation that is performed on the dst operand is one of 112 // o insert the selected bits (replacing the original contents of those bit positions) 113 // o and the selected bits with the corresponding bits of the dst operand 114 // o or the selected bits with the corresponding bits of the dst operand 115 // o xor the selected bits with the corresponding bits of the dst operand 116 // - For clear_dst == true, the destination register is cleared before the bits are inserted. 117 // For clear_dst == false, only the bit positions that get data inserted from src 118 // are changed. All other bit positions remain unchanged. 119 // - For test_only == true, the result of the logicalOP is only used to set the condition code, dst remains unchanged. 120 // For test_only == false, the result of the logicalOP replaces the selected bits of dst. 121 // - src32bit and dst32bit indicate the respective register is used as 32bit value only. 122 // Knowledge can simplify code generation. 123 // 124 // Here is an important performance note, valid for all <logicalOP>s except "insert": 125 // Due to the too complex nature of the operation, it cannot be done in a single cycle. 126 // Timing constraints require the instructions to be cracked into two micro-ops, taking 127 // one or two cycles each to execute. In some cases, an additional pipeline bubble might get added. 128 // Macroscopically, that makes up for a three- or four-cycle instruction where you would 129 // expect just a single cycle. 130 // It is thus not beneficial from a performance point of view to exploit those instructions. 131 // Other reasons (code compactness, register pressure, ...) might outweigh this penalty. 132 // 133 unsigned long create_mask(int lBitPos, int rBitPos); 134 void rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos, 135 int nRotate, bool src32bit, bool dst32bit, bool oneBits); 136 void rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, 137 bool clear_dst); 138 void rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, 139 bool test_only); 140 void rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, 141 bool test_onlyt); 142 void rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, int nRotate, 143 bool test_only); 144 145 void add64(Register r1, RegisterOrConstant inc); 146 147 // Helper function to multiply the 64bit contents of a register by a 16bit constant. 148 // The optimization tries to avoid the mghi instruction, since it uses the FPU for 149 // calculation and is thus rather slow. 150 // 151 // There is no handling for special cases, e.g. cval==0 or cval==1. 152 // 153 // Returns len of generated code block. 154 unsigned int mul_reg64_const16(Register rval, Register work, int cval); 155 156 // Generic operation r1 := r2 + imm. 157 void add2reg(Register r1, int64_t imm, Register r2 = noreg); 158 // Generic operation r := b + x + d. 159 void add2reg_with_index(Register r, int64_t d, Register x, Register b = noreg); 160 161 // Add2mem* methods for direct memory increment. 162 void add2mem_32(const Address &a, int64_t imm, Register tmp); 163 void add2mem_64(const Address &a, int64_t imm, Register tmp); 164 165 // *((int8_t*)(dst)) |= imm8 166 inline void or2mem_8(Address& dst, int64_t imm8); 167 168 // Load values by size and signedness. 169 void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed); 170 void store_sized_value(Register src, Address dst, size_t size_in_bytes); 171 172 // Load values with large offsets to base address. 173 private: 174 int split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate); 175 public: 176 void load_long_largeoffset(Register t, int64_t si20, Register a, Register tmp); 177 void load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp); 178 void load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp); 179 180 private: 181 long toc_distance(); 182 public: 183 void load_toc(Register Rtoc); 184 void load_long_pcrelative(Register Rdst, address dataLocation); 185 static int load_long_pcrelative_size() { return 6; } 186 void load_addr_pcrelative(Register Rdst, address dataLocation); 187 static int load_addr_pcrel_size() { return 6; } // Just a LARL. 188 189 // Load a value from memory and test (set CC). 190 void load_and_test_byte (Register dst, const Address &a); 191 void load_and_test_short (Register dst, const Address &a); 192 void load_and_test_int (Register dst, const Address &a); 193 void load_and_test_int2long(Register dst, const Address &a); 194 void load_and_test_long (Register dst, const Address &a); 195 196 // Test a bit in memory. Result is reflected in CC. 197 void testbit(const Address &a, unsigned int bit); 198 // Test a bit in a register. Result is reflected in CC. 199 void testbit(Register r, unsigned int bitPos); 200 201 // Clear a register, i.e. load const zero into reg. Return len (in bytes) of 202 // generated instruction(s). 203 // whole_reg: Clear 64 bits if true, 32 bits otherwise. 204 // set_cc: Use instruction that sets the condition code, if true. 205 int clear_reg(Register r, bool whole_reg = true, bool set_cc = true); 206 207 #ifdef ASSERT 208 int preset_reg(Register r, unsigned long pattern, int pattern_len); 209 #endif 210 211 // Clear (store zeros) a small piece of memory. 212 // CAUTION: Do not use this for atomic memory clearing. Use store_const() instead. 213 // addr: Address descriptor of memory to clear. 214 // Index register will not be used! 215 // size: Number of bytes to clear. 216 void clear_mem(const Address& addr, unsigned size); 217 218 // Move immediate values to memory. Currently supports 32 and 64 bit stores, 219 // but may be extended to 16 bit store operation, if needed. 220 // For details, see implementation in *.cpp file. 221 int store_const(const Address &dest, long imm, 222 unsigned int lm, unsigned int lc, 223 Register scratch = Z_R0); 224 inline int store_const(const Address &dest, long imm, 225 Register scratch = Z_R0, bool is_long = true); 226 227 // Move/initialize arbitrarily large memory area. No check for destructive overlap. 228 // Being interruptible, these instructions need a retry-loop. 229 void move_long_ext(Register dst, Register src, unsigned int pad); 230 231 void compare_long_ext(Register left, Register right, unsigned int pad); 232 void compare_long_uni(Register left, Register right, unsigned int pad); 233 234 void search_string(Register end, Register start); 235 void search_string_uni(Register end, Register start); 236 237 // Translate instructions 238 // Being interruptible, these instructions need a retry-loop. 239 void translate_oo(Register dst, Register src, uint mask); 240 void translate_ot(Register dst, Register src, uint mask); 241 void translate_to(Register dst, Register src, uint mask); 242 void translate_tt(Register dst, Register src, uint mask); 243 244 // Crypto instructions. 245 // Being interruptible, these instructions need a retry-loop. 246 void cksm(Register crcBuff, Register srcBuff); 247 void km( Register dstBuff, Register srcBuff); 248 void kmc(Register dstBuff, Register srcBuff); 249 void kimd(Register srcBuff); 250 void klmd(Register srcBuff); 251 void kmac(Register srcBuff); 252 253 // nop padding 254 void align(int modulus); 255 void align_address(int modulus); 256 257 // 258 // Constants, loading constants, TOC support 259 // 260 // Safepoint check factored out. 261 void generate_safepoint_check(Label& slow_path, Register scratch = noreg, bool may_relocate = true); 262 263 // Load generic address: d <- base(a) + index(a) + disp(a). 264 inline void load_address(Register d, const Address &a); 265 // Load absolute address (and try to optimize). 266 void load_absolute_address(Register d, address addr); 267 268 // Address of Z_ARG1 and argument_offset. 269 // If temp_reg == arg_slot, arg_slot will be overwritten. 270 Address argument_address(RegisterOrConstant arg_slot, 271 Register temp_reg = noreg, 272 int64_t extra_slot_offset = 0); 273 274 // Load a narrow ptr constant (oop or klass ptr). 275 void load_narrow_oop( Register t, narrowOop a); 276 void load_narrow_klass(Register t, Klass* k); 277 278 static bool is_load_const_32to64(address pos); 279 static bool is_load_narrow_oop(address pos) { return is_load_const_32to64(pos); } 280 static bool is_load_narrow_klass(address pos) { return is_load_const_32to64(pos); } 281 282 static int load_const_32to64_size() { return 6; } 283 static bool load_narrow_oop_size() { return load_const_32to64_size(); } 284 static bool load_narrow_klass_size() { return load_const_32to64_size(); } 285 286 static int patch_load_const_32to64(address pos, int64_t a); 287 static int patch_load_narrow_oop(address pos, oop o); 288 static int patch_load_narrow_klass(address pos, Klass* k); 289 290 // cOops. CLFI exploit. 291 void compare_immediate_narrow_oop(Register oop1, narrowOop oop2); 292 void compare_immediate_narrow_klass(Register op1, Klass* op2); 293 static bool is_compare_immediate32(address pos); 294 static bool is_compare_immediate_narrow_oop(address pos); 295 static bool is_compare_immediate_narrow_klass(address pos); 296 static int compare_immediate_narrow_size() { return 6; } 297 static int compare_immediate_narrow_oop_size() { return compare_immediate_narrow_size(); } 298 static int compare_immediate_narrow_klass_size() { return compare_immediate_narrow_size(); } 299 static int patch_compare_immediate_32(address pos, int64_t a); 300 static int patch_compare_immediate_narrow_oop(address pos, oop o); 301 static int patch_compare_immediate_narrow_klass(address pos, Klass* k); 302 303 // Load a 32bit constant into a 64bit register. 304 void load_const_32to64(Register t, int64_t x, bool sign_extend=true); 305 // Load a 64 bit constant. 306 void load_const(Register t, long a); 307 inline void load_const(Register t, void* a); 308 inline void load_const(Register t, Label& L); 309 inline void load_const(Register t, const AddressLiteral& a); 310 // Get the 64 bit constant from a `load_const' sequence. 311 static long get_const(address load_const); 312 // Patch the 64 bit constant of a `load_const' sequence. This is a low level 313 // procedure. It neither flushes the instruction cache nor is it atomic. 314 static void patch_const(address load_const, long x); 315 static int load_const_size() { return 12; } 316 317 // Turn a char into boolean. NOTE: destroys r. 318 void c2bool(Register r, Register t = Z_R0); 319 320 // Optimized version of load_const for constants that do not need to be 321 // loaded by a sequence of instructions of fixed length and that do not 322 // need to be patched. 323 int load_const_optimized_rtn_len(Register t, long x, bool emit); 324 inline void load_const_optimized(Register t, long x); 325 inline void load_const_optimized(Register t, void* a); 326 inline void load_const_optimized(Register t, Label& L); 327 inline void load_const_optimized(Register t, const AddressLiteral& a); 328 329 public: 330 331 //---------------------------------------------------------- 332 // oops in code ------------- 333 // including compressed oops support ------------- 334 //---------------------------------------------------------- 335 336 // Metadata in code that we have to keep track of. 337 AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index 338 AddressLiteral constant_metadata_address(Metadata* obj); // find_index 339 340 // allocate_index 341 AddressLiteral allocate_oop_address(jobject obj); 342 // find_index 343 AddressLiteral constant_oop_address(jobject obj); 344 // Uses allocate_oop_address. 345 inline void set_oop (jobject obj, Register d); 346 // Uses constant_oop_address. 347 inline void set_oop_constant(jobject obj, Register d); 348 // Uses constant_metadata_address. 349 inline bool set_metadata_constant(Metadata* md, Register d); 350 351 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, 352 Register tmp, 353 int offset); 354 // 355 // branch, jump 356 // 357 358 // Use one generic function for all branch patches. 359 static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos); 360 361 void pd_patch_instruction(address branch, address target); 362 363 // Extract relative address from "relative" instructions. 364 static long get_pcrel_offset(unsigned long inst); 365 static long get_pcrel_offset(address pc); 366 static address get_target_addr_pcrel(address pc); 367 368 static inline bool is_call_pcrelative_short(unsigned long inst); 369 static inline bool is_call_pcrelative_long(unsigned long inst); 370 static inline bool is_branch_pcrelative_short(unsigned long inst); 371 static inline bool is_branch_pcrelative_long(unsigned long inst); 372 static inline bool is_compareandbranch_pcrelative_short(unsigned long inst); 373 static inline bool is_branchoncount_pcrelative_short(unsigned long inst); 374 static inline bool is_branchonindex32_pcrelative_short(unsigned long inst); 375 static inline bool is_branchonindex64_pcrelative_short(unsigned long inst); 376 static inline bool is_branchonindex_pcrelative_short(unsigned long inst); 377 static inline bool is_branch_pcrelative16(unsigned long inst); 378 static inline bool is_branch_pcrelative32(unsigned long inst); 379 static inline bool is_branch_pcrelative(unsigned long inst); 380 static inline bool is_load_pcrelative_long(unsigned long inst); 381 static inline bool is_misc_pcrelative_long(unsigned long inst); 382 static inline bool is_pcrelative_short(unsigned long inst); 383 static inline bool is_pcrelative_long(unsigned long inst); 384 // PCrelative TOC access. Variants with address argument. 385 static inline bool is_load_pcrelative_long(address iLoc); 386 static inline bool is_pcrelative_short(address iLoc); 387 static inline bool is_pcrelative_long(address iLoc); 388 389 static inline bool is_pcrelative_instruction(address iloc); 390 static inline bool is_load_addr_pcrel(address a); 391 392 static void patch_target_addr_pcrel(address pc, address con); 393 static void patch_addr_pcrel(address pc, address con) { 394 patch_target_addr_pcrel(pc, con); // Just delegate. This is only for nativeInst_s390.cpp. 395 } 396 397 //--------------------------------------------------------- 398 // Some macros for more comfortable assembler programming. 399 //--------------------------------------------------------- 400 401 // NOTE: pass NearLabel T to signal that the branch target T will be bound to a near address. 402 403 void compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); 404 void compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); 405 void compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); 406 void compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target); 407 408 void branch_optimized(Assembler::branch_condition cond, address branch_target); 409 void branch_optimized(Assembler::branch_condition cond, Label& branch_target); 410 void compare_and_branch_optimized(Register r1, 411 Register r2, 412 Assembler::branch_condition cond, 413 address branch_addr, 414 bool len64, 415 bool has_sign); 416 void compare_and_branch_optimized(Register r1, 417 jlong x2, 418 Assembler::branch_condition cond, 419 Label& branch_target, 420 bool len64, 421 bool has_sign); 422 void compare_and_branch_optimized(Register r1, 423 Register r2, 424 Assembler::branch_condition cond, 425 Label& branch_target, 426 bool len64, 427 bool has_sign); 428 429 // 430 // Support for frame handling 431 // 432 // Specify the register that should be stored as the return pc in the 433 // current frame (default is R14). 434 inline void save_return_pc(Register pc = Z_R14); 435 inline void restore_return_pc(); 436 437 // Get current PC. 438 address get_PC(Register result); 439 440 // Get current PC + offset. Offset given in bytes, must be even! 441 address get_PC(Register result, int64_t offset); 442 443 // Resize current frame either relatively wrt to current SP or absolute. 444 void resize_frame_sub(Register offset, Register fp, bool load_fp=true); 445 void resize_frame_absolute(Register addr, Register fp, bool load_fp=true); 446 void resize_frame(RegisterOrConstant offset, Register fp, bool load_fp=true); 447 448 // Push a frame of size bytes, if copy_sp is false, old_sp must already 449 // contain a copy of Z_SP. 450 void push_frame(Register bytes, Register old_sp, bool copy_sp = true, bool bytes_with_inverted_sign = false); 451 452 // Push a frame of size `bytes'. no abi space provided. 453 // Don't rely on register locking, instead pass a scratch register 454 // (Z_R0 by default). 455 // CAUTION! passing registers >= Z_R2 may produce bad results on 456 // old CPUs! 457 unsigned int push_frame(unsigned int bytes, Register scratch = Z_R0); 458 459 // Push a frame of size `bytes' with abi160 on top. 460 unsigned int push_frame_abi160(unsigned int bytes); 461 462 // Pop current C frame. 463 void pop_frame(); 464 465 // 466 // Calls 467 // 468 469 private: 470 address _last_calls_return_pc; 471 472 public: 473 // Support for VM calls. This is the base routine called by the 474 // different versions of call_VM_leaf. The interpreter may customize 475 // this version by overriding it for its purposes (e.g., to 476 // save/restore additional registers when doing a VM call). 477 void call_VM_leaf_base(address entry_point); 478 void call_VM_leaf_base(address entry_point, bool allow_relocation); 479 480 // It is imperative that all calls into the VM are handled via the 481 // call_VM macros. They make sure that the stack linkage is setup 482 // correctly. Call_VM's correspond to ENTRY/ENTRY_X entry points 483 // while call_VM_leaf's correspond to LEAF entry points. 484 // 485 // This is the base routine called by the different versions of 486 // call_VM. The interpreter may customize this version by overriding 487 // it for its purposes (e.g., to save/restore additional registers 488 // when doing a VM call). 489 490 // If no last_java_sp is specified (noreg) then SP will be used instead. 491 492 virtual void call_VM_base( 493 Register oop_result, // Where an oop-result ends up if any; use noreg otherwise. 494 Register last_java_sp, // To set up last_Java_frame in stubs; use noreg otherwise. 495 address entry_point, // The entry point. 496 bool check_exception); // Flag which indicates if exception should be checked. 497 virtual void call_VM_base( 498 Register oop_result, // Where an oop-result ends up if any; use noreg otherwise. 499 Register last_java_sp, // To set up last_Java_frame in stubs; use noreg otherwise. 500 address entry_point, // The entry point. 501 bool allow_relocation, // Flag to request generation of relocatable code. 502 bool check_exception); // Flag which indicates if exception should be checked. 503 504 // Call into the VM. 505 // Passes the thread pointer (in Z_ARG1) as a prepended argument. 506 // Makes sure oop return values are visible to the GC. 507 void call_VM(Register oop_result, address entry_point, bool check_exceptions = true); 508 void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true); 509 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); 510 void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, 511 Register arg_3, bool check_exceptions = true); 512 513 void call_VM_static(Register oop_result, address entry_point, bool check_exceptions = true); 514 void call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2, 515 Register arg_3, bool check_exceptions = true); 516 517 // Overloaded with last_java_sp. 518 void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true); 519 void call_VM(Register oop_result, Register last_java_sp, address entry_point, 520 Register arg_1, bool check_exceptions = true); 521 void call_VM(Register oop_result, Register last_java_sp, address entry_point, 522 Register arg_1, Register arg_2, bool check_exceptions = true); 523 void call_VM(Register oop_result, Register last_java_sp, address entry_point, 524 Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true); 525 526 void call_VM_leaf(address entry_point); 527 void call_VM_leaf(address entry_point, Register arg_1); 528 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2); 529 void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3); 530 531 // Really static VM leaf call (never patched). 532 void call_VM_leaf_static(address entry_point); 533 void call_VM_leaf_static(address entry_point, Register arg_1); 534 void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2); 535 void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3); 536 537 // Call a C function via its function entry. Updates and returns _last_calls_return_pc. 538 inline address call(Register function_entry); 539 inline address call_c(Register function_entry); 540 address call_c(address function_entry); 541 // Variant for really static (non-relocatable) calls which are never patched. 542 address call_c_static(address function_entry); 543 // TOC or pc-relative call + emits a runtime_call relocation. 544 address call_c_opt(address function_entry); 545 546 inline address call_stub(Register function_entry); 547 inline address call_stub(address function_entry); 548 549 // Get the pc where the last call will return to. Returns _last_calls_return_pc. 550 inline address last_calls_return_pc(); 551 552 private: 553 static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call. 554 static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs. 555 556 557 public: 558 bool call_far_patchable(address target, int64_t toc_offset); 559 static bool is_call_far_patchable_at(address inst_start); // All supported forms of patchable calls. 560 static bool is_call_far_patchable_pcrelative_at(address inst_start); // Pc-relative call with leading nops. 561 static bool is_call_far_pcrelative(address instruction_addr); // Pure far pc-relative call, with one leading size adjustment nop. 562 static void set_dest_of_call_far_patchable_at(address inst_start, address target, int64_t toc_offset); 563 static address get_dest_of_call_far_patchable_at(address inst_start, address toc_start); 564 565 void align_call_far_patchable(address pc); 566 567 // PCrelative TOC access. 568 569 // This value is independent of code position - constant for the lifetime of the VM. 570 static int call_far_patchable_size() { 571 return load_const_from_toc_size() + call_byregister_size(); 572 } 573 574 static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); } 575 576 static bool call_far_patchable_requires_alignment_nop(address pc) { 577 if (!os::is_MP()) return false; 578 int size = call_far_patchable_size(); 579 return ((intptr_t)(pc + size) & 0x03L) != 0; 580 } 581 582 // END OF PCrelative TOC access. 583 584 static int jump_byregister_size() { return 2; } 585 static int jump_pcrelative_size() { return 4; } 586 static int jump_far_pcrelative_size() { return 6; } 587 static int call_byregister_size() { return 2; } 588 static int call_pcrelative_size() { return 4; } 589 static int call_far_pcrelative_size() { return 2 + 6; } // Prepend each BRASL with a nop. 590 static int call_far_pcrelative_size_raw() { return 6; } // Prepend each BRASL with a nop. 591 592 // 593 // Java utilities 594 // 595 596 // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. 597 // The implementation is only non-empty for the InterpreterMacroAssembler, 598 // as only the interpreter handles PopFrame and ForceEarlyReturn requests. 599 virtual void check_and_handle_popframe(Register java_thread); 600 virtual void check_and_handle_earlyret(Register java_thread); 601 602 // Polling page support. 603 enum poll_mask { 604 mask_stackbang = 0xde, // 222 (dec) 605 mask_safepoint = 0x6f, // 111 (dec) 606 mask_profiling = 0xba // 186 (dec) 607 }; 608 609 // Read from the polling page. 610 void load_from_polling_page(Register polling_page_address, int64_t offset = 0); 611 612 // Check if given instruction is a read from the polling page 613 // as emitted by load_from_polling_page. 614 static bool is_load_from_polling_page(address instr_loc); 615 // Extract poll address from instruction and ucontext. 616 static address get_poll_address(address instr_loc, void* ucontext); 617 // Extract poll register from instruction. 618 static uint get_poll_register(address instr_loc); 619 620 // Check if instruction is a write access to the memory serialization page 621 // realized by one of the instructions stw, stwu, stwx, or stwux. 622 static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext); 623 624 // Support for serializing memory accesses between threads. 625 void serialize_memory(Register thread, Register tmp1, Register tmp2); 626 627 // Stack overflow checking 628 void bang_stack_with_offset(int offset); 629 630 // Atomics 631 // -- none? 632 633 void tlab_allocate(Register obj, // Result: pointer to object after successful allocation 634 Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise. 635 int con_size_in_bytes, // Object size in bytes if known at compile time. 636 Register t1, // temp register 637 Label& slow_case); // Continuation point if fast allocation fails. 638 639 // Emitter for interface method lookup. 640 // input: recv_klass, intf_klass, itable_index 641 // output: method_result 642 // kills: itable_index, temp1_reg, Z_R0, Z_R1 643 void lookup_interface_method(Register recv_klass, 644 Register intf_klass, 645 RegisterOrConstant itable_index, 646 Register method_result, 647 Register temp1_reg, 648 Register temp2_reg, 649 Label& no_such_interface); 650 651 // virtual method calling 652 void lookup_virtual_method(Register recv_klass, 653 RegisterOrConstant vtable_index, 654 Register method_result); 655 656 // Factor out code to call ic_miss_handler. 657 unsigned int call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch); 658 void nmethod_UEP(Label& ic_miss); 659 660 // Emitters for "partial subtype" checks. 661 662 // Test sub_klass against super_klass, with fast and slow paths. 663 664 // The fast path produces a tri-state answer: yes / no / maybe-slow. 665 // One of the three labels can be NULL, meaning take the fall-through. 666 // If super_check_offset is -1, the value is loaded up from super_klass. 667 // No registers are killed, except temp_reg and temp2_reg. 668 // If super_check_offset is not -1, temp1_reg is not used and can be noreg. 669 void check_klass_subtype_fast_path(Register sub_klass, 670 Register super_klass, 671 Register temp1_reg, 672 Label* L_success, 673 Label* L_failure, 674 Label* L_slow_path, 675 RegisterOrConstant super_check_offset = RegisterOrConstant(-1)); 676 677 // The rest of the type check; must be wired to a corresponding fast path. 678 // It does not repeat the fast path logic, so don't use it standalone. 679 // The temp_reg can be noreg, if no temps are available. 680 // It can also be sub_klass or super_klass, meaning it's OK to kill that one. 681 // Updates the sub's secondary super cache as necessary. 682 void check_klass_subtype_slow_path(Register Rsubklass, 683 Register Rsuperklas, 684 Register Rarray_ptr, // tmp 685 Register Rlength, // tmp 686 Label* L_success, 687 Label* L_failure); 688 689 // Simplified, combined version, good for typical uses. 690 // Falls through on failure. 691 void check_klass_subtype(Register sub_klass, 692 Register super_klass, 693 Register temp1_reg, 694 Register temp2_reg, 695 Label& L_success); 696 697 // Increment a counter at counter_address when the eq condition code is set. 698 // Kills registers tmp1_reg and tmp2_reg and preserves the condition code. 699 void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg); 700 // Biased locking support 701 // Upon entry,obj_reg must contain the target object, and mark_reg 702 // must contain the target object's header. 703 // Destroys mark_reg if an attempt is made to bias an anonymously 704 // biased lock. In this case a failure will go either to the slow 705 // case or fall through with the notEqual condition code set with 706 // the expectation that the slow case in the runtime will be called. 707 // In the fall-through case where the CAS-based lock is done, 708 // mark_reg is not destroyed. 709 void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg, 710 Register temp2_reg, Label& done, Label* slow_case = NULL); 711 // Upon entry, the base register of mark_addr must contain the oop. 712 // Destroys temp_reg. 713 // If allow_delay_slot_filling is set to true, the next instruction 714 // emitted after this one will go in an annulled delay slot if the 715 // biased locking exit case failed. 716 void biased_locking_exit(Register mark_addr, Register temp_reg, Label& done); 717 718 void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking); 719 void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking); 720 721 // Write to card table for modification at store_addr - register is destroyed afterwards. 722 void card_write_barrier_post(Register store_addr, Register tmp); 723 724 #if INCLUDE_ALL_GCS 725 // General G1 pre-barrier generator. 726 // Purpose: record the previous value if it is not null. 727 // All non-tmps are preserved. 728 void g1_write_barrier_pre(Register Robj, 729 RegisterOrConstant offset, 730 Register Rpre_val, // Ideally, this is a non-volatile register. 731 Register Rval, // Will be preserved. 732 Register Rtmp1, // If Rpre_val is volatile, either Rtmp1 733 Register Rtmp2, // or Rtmp2 has to be non-volatile. 734 bool pre_val_needed); // Save Rpre_val across runtime call, caller uses it. 735 736 // General G1 post-barrier generator. 737 // Purpose: Store cross-region card. 738 void g1_write_barrier_post(Register Rstore_addr, 739 Register Rnew_val, 740 Register Rtmp1, 741 Register Rtmp2, 742 Register Rtmp3); 743 #endif // INCLUDE_ALL_GCS 744 745 // Support for last Java frame (but use call_VM instead where possible). 746 private: 747 void set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation); 748 void reset_last_Java_frame(bool allow_relocation); 749 void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation); 750 public: 751 inline void set_last_Java_frame(Register last_java_sp, Register last_Java_pc); 752 inline void set_last_Java_frame_static(Register last_java_sp, Register last_Java_pc); 753 inline void reset_last_Java_frame(void); 754 inline void reset_last_Java_frame_static(void); 755 inline void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1); 756 inline void set_top_ijava_frame_at_SP_as_last_Java_frame_static(Register sp, Register tmp1); 757 758 void set_thread_state(JavaThreadState new_state); 759 760 // Read vm result from thread. 761 void get_vm_result (Register oop_result); 762 void get_vm_result_2(Register result); 763 764 // Vm result is currently getting hijacked to for oop preservation. 765 void set_vm_result(Register oop_result); 766 767 // Support for NULL-checks 768 // 769 // Generates code that causes a NULL OS exception if the content of reg is NULL. 770 // If the accessed location is M[reg + offset] and the offset is known, provide the 771 // offset. No explicit code generation is needed if the offset is within a certain 772 // range (0 <= offset <= page_size). 773 // 774 // %%%%%% Currently not done for z/Architecture 775 776 void null_check(Register reg, Register tmp = Z_R0, int64_t offset = -1); 777 static bool needs_explicit_null_check(intptr_t offset); // Implemented in shared file ?! 778 779 // Klass oop manipulations if compressed. 780 void encode_klass_not_null(Register dst, Register src = noreg); 781 void decode_klass_not_null(Register dst, Register src); 782 void decode_klass_not_null(Register dst); 783 void load_klass(Register klass, Address mem); 784 void load_klass(Register klass, Register src_oop); 785 void load_prototype_header(Register Rheader, Register Rsrc_oop); 786 void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided. 787 void store_klass_gap(Register s, Register dst_oop); 788 789 // This function calculates the size of the code generated by 790 // decode_klass_not_null(register dst) 791 // when (Universe::heap() != NULL). Hence, if the instructions 792 // it generates change, then this method needs to be updated. 793 static int instr_size_for_decode_klass_not_null(); 794 795 void encode_heap_oop(Register oop); 796 void encode_heap_oop_not_null(Register oop); 797 798 static int get_oop_base_pow2_offset(uint64_t oop_base); 799 int get_oop_base(Register Rbase, uint64_t oop_base); 800 int get_oop_base_complement(Register Rbase, uint64_t oop_base); 801 void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL); 802 void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL); 803 void load_heap_oop(Register dest, const Address &a); 804 void load_heap_oop(Register d, int64_t si16, Register s1); 805 void load_heap_oop_not_null(Register d, int64_t si16, Register s1); 806 void store_heap_oop(Register Roop, RegisterOrConstant offset, Register base); 807 void store_heap_oop_not_null(Register Roop, RegisterOrConstant offset, Register base); 808 void store_heap_oop_null(Register zero, RegisterOrConstant offset, Register base); 809 void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL, 810 Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false); 811 void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, 812 Register Rbase = Z_R1, int pow2_offset = -1); 813 814 void load_mirror(Register mirror, Register method); 815 816 //-------------------------- 817 //--- perations on arrays. 818 //-------------------------- 819 unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register src_addr, Register src_len); 820 unsigned int Clear_Array_Const(long cnt, Register base); 821 unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register src_addr, Register src_len); 822 unsigned int CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg, 823 Register cnt_reg, 824 Register tmp1_reg, Register tmp2_reg); 825 826 //------------------------------------------- 827 // Special String Intrinsics Implementation. 828 //------------------------------------------- 829 // Intrinsics for CompactStrings 830 // Compress char[] to byte[]. odd_reg contains cnt. tmp3 is only needed for precise behavior in failure case. Kills dst. 831 unsigned int string_compress(Register result, Register src, Register dst, Register odd_reg, 832 Register even_reg, Register tmp, Register tmp2 = noreg); 833 834 // Kills src. 835 unsigned int has_negatives(Register result, Register src, Register cnt, 836 Register odd_reg, Register even_reg, Register tmp); 837 838 // Inflate byte[] to char[]. 839 unsigned int string_inflate_trot(Register src, Register dst, Register cnt, Register tmp); 840 // Odd_reg contains cnt. Kills src. 841 unsigned int string_inflate(Register src, Register dst, Register odd_reg, 842 Register even_reg, Register tmp); 843 844 unsigned int string_compare(Register str1, Register str2, Register cnt1, Register cnt2, 845 Register odd_reg, Register even_reg, Register result, int ae); 846 847 unsigned int array_equals(bool is_array_equ, Register ary1, Register ary2, Register limit, 848 Register odd_reg, Register even_reg, Register result, bool is_byte); 849 850 unsigned int string_indexof(Register result, Register haystack, Register haycnt, 851 Register needle, Register needlecnt, int needlecntval, 852 Register odd_reg, Register even_reg, int ae); 853 854 unsigned int string_indexof_char(Register result, Register haystack, Register haycnt, 855 Register needle, jchar needleChar, Register odd_reg, Register even_reg, bool is_byte); 856 857 // Emit an oop const to the constant pool and set a relocation info 858 // with address current_pc. Return the TOC offset of the constant. 859 int store_const_in_toc(AddressLiteral& val); 860 int store_oop_in_toc(AddressLiteral& oop); 861 // Emit an oop const to the constant pool via store_oop_in_toc, or 862 // emit a scalar const to the constant pool via store_const_in_toc, 863 // and load the constant into register dst. 864 bool load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg); 865 // Get CPU version dependent size of load_const sequence. 866 // The returned value is valid only for code sequences 867 // generated by load_const, not load_const_optimized. 868 static int load_const_from_toc_size() { 869 return load_long_pcrelative_size(); 870 } 871 bool load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg); 872 static intptr_t get_const_from_toc(address pc); 873 static void set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb); 874 875 // Dynamic TOC. 876 static bool is_load_const(address a); 877 static bool is_load_const_from_toc_pcrelative(address a); 878 static bool is_load_const_from_toc(address a) { return is_load_const_from_toc_pcrelative(a); } 879 880 // PCrelative TOC access. 881 static bool is_call_byregister(address a) { return is_z_basr(*(short*)a); } 882 static bool is_load_const_from_toc_call(address a); 883 static bool is_load_const_call(address a); 884 static int load_const_call_size() { return load_const_size() + call_byregister_size(); } 885 static int load_const_from_toc_call_size() { return load_const_from_toc_size() + call_byregister_size(); } 886 // Offset is +/- 2**32 -> use long. 887 static long get_load_const_from_toc_offset(address a); 888 889 890 void generate_type_profiling(const Register Rdata, 891 const Register Rreceiver_klass, 892 const Register Rwanted_receiver_klass, 893 const Register Rmatching_row, 894 bool is_virtual_call); 895 896 // Bit operations for single register operands. 897 inline void lshift(Register r, int places, bool doubl = true); // << 898 inline void rshift(Register r, int places, bool doubl = true); // >> 899 900 // 901 // Debugging 902 // 903 904 // Assert on CC (condition code in CPU state). 905 void asm_assert(bool check_equal, const char* msg, int id) PRODUCT_RETURN; 906 void asm_assert_low(const char *msg, int id) PRODUCT_RETURN; 907 void asm_assert_high(const char *msg, int id) PRODUCT_RETURN; 908 void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); } 909 void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); } 910 911 void asm_assert_static(bool check_equal, const char* msg, int id) PRODUCT_RETURN; 912 913 private: 914 // Emit assertions. 915 void asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset, 916 Register mem_base, const char* msg, int id) PRODUCT_RETURN; 917 918 public: 919 inline void asm_assert_mem4_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { 920 asm_assert_mems_zero(true, true, 4, mem_offset, mem_base, msg, id); 921 } 922 inline void asm_assert_mem8_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { 923 asm_assert_mems_zero(true, true, 8, mem_offset, mem_base, msg, id); 924 } 925 inline void asm_assert_mem4_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { 926 asm_assert_mems_zero(false, true, 4, mem_offset, mem_base, msg, id); 927 } 928 inline void asm_assert_mem8_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) { 929 asm_assert_mems_zero(false, true, 8, mem_offset, mem_base, msg, id); 930 } 931 932 inline void asm_assert_mem4_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { 933 asm_assert_mems_zero(true, false, 4, mem_offset, mem_base, msg, id); 934 } 935 inline void asm_assert_mem8_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { 936 asm_assert_mems_zero(true, false, 8, mem_offset, mem_base, msg, id); 937 } 938 inline void asm_assert_mem4_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { 939 asm_assert_mems_zero(false, false, 4, mem_offset, mem_base, msg, id); 940 } 941 inline void asm_assert_mem8_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) { 942 asm_assert_mems_zero(false, false, 8, mem_offset, mem_base, msg, id); 943 } 944 void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) PRODUCT_RETURN; 945 946 // Verify Z_thread contents. 947 void verify_thread(); 948 949 // Only if +VerifyOops. 950 void verify_oop(Register reg, const char* s = "broken oop"); 951 952 // TODO: verify_method and klass metadata (compare against vptr?). 953 void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} 954 void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {} 955 956 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__) 957 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__) 958 959 private: 960 // Generate printout in stop(). 961 static const char* stop_types[]; 962 enum { 963 stop_stop = 0, 964 stop_untested = 1, 965 stop_unimplemented = 2, 966 stop_shouldnotreachhere = 3, 967 stop_end = 4 968 }; 969 // Prints msg and stops execution. 970 void stop(int type, const char* msg, int id = 0); 971 address stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation); // Non-relocateable code only!! 972 void stop_static(int type, const char* msg, int id); // Non-relocateable code only!! 973 974 public: 975 976 // Prints msg and stops. 977 address stop_chain( address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, true); } 978 address stop_chain_static(address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, false); } 979 void stop_static (const char* msg = "", int id = 0) { stop_static(stop_stop, msg, id); } 980 void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); } 981 void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); } 982 void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); } 983 void should_not_reach_here(const char* msg = "", int id = -1) { stop(stop_shouldnotreachhere, msg, id); } 984 985 // Factor out part of stop into subroutine to save space. 986 void stop_subroutine(); 987 988 // Prints msg, but don't stop. 989 void warn(const char* msg); 990 991 //----------------------------- 992 //--- basic block tracing code 993 //----------------------------- 994 void trace_basic_block(uint i); 995 void init_basic_block_trace(); 996 // Number of bytes a basic block gets larger due to the tracing code macro (worst case). 997 // Currently, worst case is 48 bytes. 64 puts us securely on the safe side. 998 static int basic_blck_trace_blk_size_incr() { return 64; } 999 1000 // Write pattern 0x0101010101010101 in region [low-before, high+after]. 1001 // Low and high may be the same registers. Before and after are 1002 // the numbers of 8-byte words. 1003 void zap_from_to(Register low, Register high, Register tmp1 = Z_R0, Register tmp2 = Z_R1, 1004 int before = 0, int after = 0) PRODUCT_RETURN; 1005 1006 // Emitters for CRC32 calculation. 1007 private: 1008 void fold_byte_crc32(Register crc, Register table, Register val, Register tmp); 1009 void fold_8bit_crc32(Register crc, Register table, Register tmp); 1010 void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, 1011 Register data, bool invertCRC); 1012 void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc, 1013 Register t0, Register t1, Register t2, Register t3); 1014 public: 1015 void update_byte_crc32( Register crc, Register val, Register table); 1016 void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp); 1017 void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table, 1018 Register t0, Register t1, Register t2, Register t3); 1019 void kernel_crc32_1word(Register crc, Register buf, Register len, Register table, 1020 Register t0, Register t1, Register t2, Register t3); 1021 void kernel_crc32_2word(Register crc, Register buf, Register len, Register table, 1022 Register t0, Register t1, Register t2, Register t3); 1023 1024 // Emitters for BigInteger.multiplyToLen intrinsic 1025 // note: length of result array (zlen) is passed on the stack 1026 private: 1027 void add2_with_carry(Register dest_hi, Register dest_lo, 1028 Register src1, Register src2); 1029 void multiply_64_x_64_loop(Register x, Register xstart, 1030 Register x_xstart, 1031 Register y, Register y_idx, Register z, 1032 Register carry, Register product, 1033 Register idx, Register kdx); 1034 void multiply_add_128_x_128(Register x_xstart, Register y, Register z, 1035 Register yz_idx, Register idx, 1036 Register carry, Register product, int offset); 1037 void multiply_128_x_128_loop(Register x_xstart, 1038 Register y, Register z, 1039 Register yz_idx, Register idx, 1040 Register jdx, 1041 Register carry, Register product, 1042 Register carry2); 1043 public: 1044 void multiply_to_len(Register x, Register xlen, 1045 Register y, Register ylen, 1046 Register z, 1047 Register tmp1, Register tmp2, 1048 Register tmp3, Register tmp4, Register tmp5); 1049 }; 1050 1051 /** 1052 * class SkipIfEqual: 1053 * 1054 * Instantiating this class will result in assembly code being output that will 1055 * jump around any code emitted between the creation of the instance and it's 1056 * automatic destruction at the end of a scope block, depending on the value of 1057 * the flag passed to the constructor, which will be checked at run-time. 1058 */ 1059 class SkipIfEqual { 1060 private: 1061 MacroAssembler* _masm; 1062 Label _label; 1063 1064 public: 1065 SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register _rscratch); 1066 ~SkipIfEqual(); 1067 }; 1068 1069 #ifdef ASSERT 1070 // Return false (e.g. important for our impl. of virtual calls). 1071 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; } 1072 #endif 1073 1074 #endif // CPU_S390_VM_MACROASSEMBLER_S390_HPP