1 /* 2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2017, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/cardTableModRefBS.hpp" 37 #include "nativeInst_s390.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "vmreg_s390.inline.hpp" 41 42 #define __ _masm-> 43 44 #ifndef PRODUCT 45 #undef __ 46 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)-> 47 #endif 48 49 //------------------------------------------------------------ 50 51 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 52 // Not used on ZARCH_64 53 ShouldNotCallThis(); 54 return false; 55 } 56 57 LIR_Opr LIR_Assembler::receiverOpr() { 58 return FrameMap::Z_R2_oop_opr; 59 } 60 61 LIR_Opr LIR_Assembler::osrBufferPointer() { 62 return FrameMap::Z_R2_opr; 63 } 64 65 int LIR_Assembler::initial_frame_size_in_bytes() const { 66 return in_bytes(frame_map()->framesize_in_bytes()); 67 } 68 69 // Inline cache check: done before the frame is built. 70 // The inline cached class is in Z_inline_cache(Z_R9). 71 // We fetch the class of the receiver and compare it with the cached class. 72 // If they do not match we jump to the slow case. 73 int LIR_Assembler::check_icache() { 74 Register receiver = receiverOpr()->as_register(); 75 int offset = __ offset(); 76 __ inline_cache_check(receiver, Z_inline_cache); 77 return offset; 78 } 79 80 void LIR_Assembler::osr_entry() { 81 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 82 // 83 // 1. Create a new compiled activation. 84 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 85 // at the osr_bci; it is not initialized. 86 // 3. Jump to the continuation address in compiled code to resume execution. 87 88 // OSR entry point 89 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 90 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 91 ValueStack* entry_state = osr_entry->end()->state(); 92 int number_of_locks = entry_state->locks_size(); 93 94 // Create a frame for the compiled activation. 95 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 96 97 // OSR buffer is 98 // 99 // locals[nlocals-1..0] 100 // monitors[number_of_locks-1..0] 101 // 102 // Locals is a direct copy of the interpreter frame so in the osr buffer 103 // the first slot in the local array is the last local from the interpreter 104 // and the last slot is local[0] (receiver) from the interpreter 105 // 106 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 107 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 108 // in the interpreter frame (the method lock if a sync method) 109 110 // Initialize monitors in the compiled activation. 111 // I0: pointer to osr buffer 112 // 113 // All other registers are dead at this point and the locals will be 114 // copied into place by code emitted in the IR. 115 116 Register OSR_buf = osrBufferPointer()->as_register(); 117 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 118 int monitor_offset = BytesPerWord * method()->max_locals() + 119 (2 * BytesPerWord) * (number_of_locks - 1); 120 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 121 // the OSR buffer using 2 word entries: first the lock and then 122 // the oop. 123 for (int i = 0; i < number_of_locks; i++) { 124 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 125 // Verify the interpreter's monitor has a non-null object. 126 __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__); 127 // Copy the lock field into the compiled activation. 128 __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); 129 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); 130 __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf); 131 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i)); 132 } 133 } 134 } 135 136 // -------------------------------------------------------------------------------------------- 137 138 address LIR_Assembler::emit_call_c(address a) { 139 __ align_call_far_patchable(__ pc()); 140 address call_addr = __ call_c_opt(a); 141 if (call_addr == NULL) { 142 bailout("const section overflow"); 143 } 144 return call_addr; 145 } 146 147 int LIR_Assembler::emit_exception_handler() { 148 // If the last instruction is a call (typically to do a throw which 149 // is coming at the end after block reordering) the return address 150 // must still point into the code area in order to avoid assertion 151 // failures when searching for the corresponding bci. => Add a nop. 152 // (was bug 5/14/1999 - gri) 153 __ nop(); 154 155 // Generate code for exception handler. 156 address handler_base = __ start_a_stub(exception_handler_size()); 157 if (handler_base == NULL) { 158 // Not enough space left for the handler. 159 bailout("exception handler overflow"); 160 return -1; 161 } 162 163 int offset = code_offset(); 164 165 address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id); 166 address call_addr = emit_call_c(a); 167 CHECK_BAILOUT_(-1); 168 __ should_not_reach_here(); 169 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 170 __ end_a_stub(); 171 172 return offset; 173 } 174 175 // Emit the code to remove the frame from the stack in the exception 176 // unwind path. 177 int LIR_Assembler::emit_unwind_handler() { 178 #ifndef PRODUCT 179 if (CommentedAssembly) { 180 _masm->block_comment("Unwind handler"); 181 } 182 #endif 183 184 int offset = code_offset(); 185 Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. 186 Register Rtmp1 = Z_R11; 187 Register Rtmp2 = Z_R12; 188 189 // Fetch the exception from TLS and clear out exception related thread state. 190 Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset()); 191 Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset()); 192 __ z_lg(Z_EXC_OOP, exc_oop_addr); 193 __ clear_mem(exc_oop_addr, sizeof(oop)); 194 __ clear_mem(exc_pc_addr, sizeof(intptr_t)); 195 196 __ bind(_unwind_handler_entry); 197 __ verify_not_null_oop(Z_EXC_OOP); 198 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 199 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception. 200 } 201 202 // Preform needed unlocking. 203 MonitorExitStub* stub = NULL; 204 if (method()->is_synchronized()) { 205 // Runtime1::monitorexit_id expects lock address in Z_R1_scratch. 206 LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); 207 monitor_address(0, lock); 208 stub = new MonitorExitStub(lock, true, 0); 209 __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); 210 __ bind(*stub->continuation()); 211 } 212 213 if (compilation()->env()->dtrace_method_probes()) { 214 ShouldNotReachHere(); // Not supported. 215 #if 0 216 __ mov(rdi, r15_thread); 217 __ mov_metadata(rsi, method()->constant_encoding()); 218 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 219 #endif 220 } 221 222 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 223 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception. 224 } 225 226 // Remove the activation and dispatch to the unwind handler. 227 __ pop_frame(); 228 __ z_lg(Z_EXC_PC, _z_abi16(return_pc), Z_SP); 229 230 // Z_EXC_OOP: exception oop 231 // Z_EXC_PC: exception pc 232 233 // Dispatch to the unwind logic. 234 __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id)); 235 __ z_br(Z_R5); 236 237 // Emit the slow path assembly. 238 if (stub != NULL) { 239 stub->emit_code(this); 240 } 241 242 return offset; 243 } 244 245 int LIR_Assembler::emit_deopt_handler() { 246 // If the last instruction is a call (typically to do a throw which 247 // is coming at the end after block reordering) the return address 248 // must still point into the code area in order to avoid assertion 249 // failures when searching for the corresponding bci. => Add a nop. 250 // (was bug 5/14/1999 - gri) 251 __ nop(); 252 253 // Generate code for exception handler. 254 address handler_base = __ start_a_stub(deopt_handler_size()); 255 if (handler_base == NULL) { 256 // Not enough space left for the handler. 257 bailout("deopt handler overflow"); 258 return -1; 259 } int offset = code_offset(); 260 // Size must be constant (see HandlerImpl::emit_deopt_handler). 261 __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); 262 __ call(Z_R1_scratch); 263 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 264 __ end_a_stub(); 265 266 return offset; 267 } 268 269 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 270 if (o == NULL) { 271 __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. 272 } else { 273 AddressLiteral a = __ allocate_oop_address(o); 274 bool success = __ load_oop_from_toc(reg, a, reg); 275 if (!success) { 276 bailout("const section overflow"); 277 } 278 } 279 } 280 281 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 282 // Allocate a new index in table to hold the object once it's been patched. 283 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 284 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 285 286 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); 287 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 288 // The NULL will be dynamically patched later so the sequence to 289 // load the address literal must not be optimized. 290 __ load_const(reg, addrlit); 291 292 patching_epilog(patch, lir_patch_normal, reg, info); 293 } 294 295 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { 296 bool success = __ set_metadata_constant(md, reg); 297 if (!success) { 298 bailout("const section overflow"); 299 return; 300 } 301 } 302 303 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 304 // Allocate a new index in table to hold the klass once it's been patched. 305 int index = __ oop_recorder()->allocate_metadata_index(NULL); 306 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 307 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); 308 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 309 // The NULL will be dynamically patched later so the sequence to 310 // load the address literal must not be optimized. 311 __ load_const(reg, addrlit); 312 313 patching_epilog(patch, lir_patch_normal, reg, info); 314 } 315 316 void LIR_Assembler::emit_op3(LIR_Op3* op) { 317 switch (op->code()) { 318 case lir_idiv: 319 case lir_irem: 320 arithmetic_idiv(op->code(), 321 op->in_opr1(), 322 op->in_opr2(), 323 op->in_opr3(), 324 op->result_opr(), 325 op->info()); 326 break; 327 case lir_fmad: { 328 const FloatRegister opr1 = op->in_opr1()->as_double_reg(), 329 opr2 = op->in_opr2()->as_double_reg(), 330 opr3 = op->in_opr3()->as_double_reg(), 331 res = op->result_opr()->as_double_reg(); 332 __ z_madbr(opr3, opr1, opr2); 333 if (res != opr3) { __ z_ldr(res, opr3); } 334 } break; 335 case lir_fmaf: { 336 const FloatRegister opr1 = op->in_opr1()->as_float_reg(), 337 opr2 = op->in_opr2()->as_float_reg(), 338 opr3 = op->in_opr3()->as_float_reg(), 339 res = op->result_opr()->as_float_reg(); 340 __ z_maebr(opr3, opr1, opr2); 341 if (res != opr3) { __ z_ler(res, opr3); } 342 } break; 343 default: ShouldNotReachHere(); break; 344 } 345 } 346 347 348 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 349 #ifdef ASSERT 350 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 351 if (op->block() != NULL) { _branch_target_blocks.append(op->block()); } 352 if (op->ublock() != NULL) { _branch_target_blocks.append(op->ublock()); } 353 #endif 354 355 if (op->cond() == lir_cond_always) { 356 if (op->info() != NULL) { add_debug_info_for_branch(op->info()); } 357 __ branch_optimized(Assembler::bcondAlways, *(op->label())); 358 } else { 359 Assembler::branch_condition acond = Assembler::bcondZero; 360 if (op->code() == lir_cond_float_branch) { 361 assert(op->ublock() != NULL, "must have unordered successor"); 362 __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); 363 } 364 switch (op->cond()) { 365 case lir_cond_equal: acond = Assembler::bcondEqual; break; 366 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break; 367 case lir_cond_less: acond = Assembler::bcondLow; break; 368 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break; 369 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break; 370 case lir_cond_greater: acond = Assembler::bcondHigh; break; 371 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break; 372 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break; 373 default: ShouldNotReachHere(); 374 } 375 __ branch_optimized(acond,*(op->label())); 376 } 377 } 378 379 380 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 381 LIR_Opr src = op->in_opr(); 382 LIR_Opr dest = op->result_opr(); 383 384 switch (op->bytecode()) { 385 case Bytecodes::_i2l: 386 __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT); 387 break; 388 389 case Bytecodes::_l2i: 390 __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG); 391 break; 392 393 case Bytecodes::_i2b: 394 __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT); 395 break; 396 397 case Bytecodes::_i2c: 398 __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT); 399 break; 400 401 case Bytecodes::_i2s: 402 __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT); 403 break; 404 405 case Bytecodes::_f2d: 406 assert(dest->is_double_fpu(), "check"); 407 __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT); 408 break; 409 410 case Bytecodes::_d2f: 411 assert(dest->is_single_fpu(), "check"); 412 __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE); 413 break; 414 415 case Bytecodes::_i2f: 416 __ z_cefbr(dest->as_float_reg(), src->as_register()); 417 break; 418 419 case Bytecodes::_i2d: 420 __ z_cdfbr(dest->as_double_reg(), src->as_register()); 421 break; 422 423 case Bytecodes::_l2f: 424 __ z_cegbr(dest->as_float_reg(), src->as_register_lo()); 425 break; 426 case Bytecodes::_l2d: 427 __ z_cdgbr(dest->as_double_reg(), src->as_register_lo()); 428 break; 429 430 case Bytecodes::_f2i: 431 case Bytecodes::_f2l: { 432 Label done; 433 FloatRegister Rsrc = src->as_float_reg(); 434 Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo()); 435 __ clear_reg(Rdst, true, false); 436 __ z_cebr(Rsrc, Rsrc); 437 __ z_brno(done); // NaN -> 0 438 if (op->bytecode() == Bytecodes::_f2i) { 439 __ z_cfebr(Rdst, Rsrc, Assembler::to_zero); 440 } else { // op->bytecode() == Bytecodes::_f2l 441 __ z_cgebr(Rdst, Rsrc, Assembler::to_zero); 442 } 443 __ bind(done); 444 } 445 break; 446 447 case Bytecodes::_d2i: 448 case Bytecodes::_d2l: { 449 Label done; 450 FloatRegister Rsrc = src->as_double_reg(); 451 Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo()); 452 __ clear_reg(Rdst, true, false); // Don't set CC. 453 __ z_cdbr(Rsrc, Rsrc); 454 __ z_brno(done); // NaN -> 0 455 if (op->bytecode() == Bytecodes::_d2i) { 456 __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero); 457 } else { // Bytecodes::_d2l 458 __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero); 459 } 460 __ bind(done); 461 } 462 break; 463 464 default: ShouldNotReachHere(); 465 } 466 } 467 468 void LIR_Assembler::align_call(LIR_Code code) { 469 // End of call instruction must be 4 byte aligned. 470 int offset = __ offset(); 471 switch (code) { 472 case lir_icvirtual_call: 473 offset += MacroAssembler::load_const_from_toc_size(); 474 // no break 475 case lir_static_call: 476 case lir_optvirtual_call: 477 case lir_dynamic_call: 478 offset += NativeCall::call_far_pcrelative_displacement_offset; 479 break; 480 case lir_virtual_call: // currently, sparc-specific for niagara 481 default: ShouldNotReachHere(); 482 } 483 if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) { 484 __ nop(); 485 } 486 } 487 488 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 489 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 490 "must be aligned (offset=%d)", __ offset()); 491 assert(rtype == relocInfo::none || 492 rtype == relocInfo::opt_virtual_call_type || 493 rtype == relocInfo::static_call_type, "unexpected rtype"); 494 // Prepend each BRASL with a nop. 495 __ relocate(rtype); 496 __ z_nop(); 497 __ z_brasl(Z_R14, op->addr()); 498 add_call_info(code_offset(), op->info()); 499 } 500 501 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 502 address virtual_call_oop_addr = NULL; 503 AddressLiteral empty_ic((address) Universe::non_oop_word()); 504 virtual_call_oop_addr = __ pc(); 505 bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); 506 if (!success) { 507 bailout("const section overflow"); 508 return; 509 } 510 511 // CALL to fixup routine. Fixup routine uses ScopeDesc info 512 // to determine who we intended to call. 513 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 514 call(op, relocInfo::none); 515 } 516 517 // not supported 518 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 519 ShouldNotReachHere(); 520 } 521 522 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 523 if (from_reg != to_reg) __ z_lgr(to_reg, from_reg); 524 } 525 526 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 527 assert(src->is_constant(), "should not call otherwise"); 528 assert(dest->is_stack(), "should not call otherwise"); 529 LIR_Const* c = src->as_constant_ptr(); 530 531 unsigned int lmem = 0; 532 unsigned int lcon = 0; 533 int64_t cbits = 0; 534 Address dest_addr; 535 switch (c->type()) { 536 case T_INT: // fall through 537 case T_FLOAT: 538 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 539 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 540 break; 541 542 case T_ADDRESS: 543 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 544 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 545 break; 546 547 case T_OBJECT: 548 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 549 if (c->as_jobject() == NULL) { 550 __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); 551 } else { 552 jobject2reg(c->as_jobject(), Z_R1_scratch); 553 __ reg2mem_opt(Z_R1_scratch, dest_addr, true); 554 } 555 return; 556 557 case T_LONG: // fall through 558 case T_DOUBLE: 559 dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 560 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 561 break; 562 563 default: 564 ShouldNotReachHere(); 565 } 566 567 __ store_const(dest_addr, cbits, lmem, lcon); 568 } 569 570 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 571 assert(src->is_constant(), "should not call otherwise"); 572 assert(dest->is_address(), "should not call otherwise"); 573 // See special case in LIRGenerator::do_StoreIndexed. 574 // T_BYTE: Special case for card mark store. 575 assert(type == T_BYTE || !dest->as_address_ptr()->index()->is_valid(), "not supported"); 576 LIR_Const* c = src->as_constant_ptr(); 577 Address addr = as_Address(dest->as_address_ptr()); 578 579 int store_offset = -1; 580 unsigned int lmem = 0; 581 unsigned int lcon = 0; 582 int64_t cbits = 0; 583 switch (type) { 584 case T_INT: // fall through 585 case T_FLOAT: 586 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 587 break; 588 589 case T_ADDRESS: 590 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 591 break; 592 593 case T_OBJECT: // fall through 594 case T_ARRAY: 595 if (c->as_jobject() == NULL) { 596 if (UseCompressedOops && !wide) { 597 store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); 598 } else { 599 store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8); 600 } 601 } else { 602 jobject2reg(c->as_jobject(), Z_R1_scratch); 603 if (UseCompressedOops && !wide) { 604 __ encode_heap_oop(Z_R1_scratch); 605 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 606 } else { 607 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 608 } 609 } 610 assert(store_offset >= 0, "check"); 611 break; 612 613 case T_LONG: // fall through 614 case T_DOUBLE: 615 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 616 break; 617 618 case T_BOOLEAN: // fall through 619 case T_BYTE: 620 lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint()); 621 break; 622 623 case T_CHAR: // fall through 624 case T_SHORT: 625 lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint()); 626 break; 627 628 default: 629 ShouldNotReachHere(); 630 }; 631 632 // Index register is normally not supported, but for 633 // LIRGenerator::CardTableModRef_post_barrier we make an exception. 634 if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) { 635 __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); 636 store_offset = __ offset(); 637 if (Immediate::is_uimm12(addr.disp())) { 638 __ z_stc(Z_R0_scratch, addr); 639 } else { 640 __ z_stcy(Z_R0_scratch, addr); 641 } 642 } 643 644 if (store_offset == -1) { 645 store_offset = __ store_const(addr, cbits, lmem, lcon); 646 assert(store_offset >= 0, "check"); 647 } 648 649 if (info != NULL) { 650 add_debug_info_for_null_check(store_offset, info); 651 } 652 } 653 654 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 655 assert(src->is_constant(), "should not call otherwise"); 656 assert(dest->is_register(), "should not call otherwise"); 657 LIR_Const* c = src->as_constant_ptr(); 658 659 switch (c->type()) { 660 case T_INT: { 661 assert(patch_code == lir_patch_none, "no patching handled here"); 662 __ load_const_optimized(dest->as_register(), c->as_jint()); 663 break; 664 } 665 666 case T_ADDRESS: { 667 assert(patch_code == lir_patch_none, "no patching handled here"); 668 __ load_const_optimized(dest->as_register(), c->as_jint()); 669 break; 670 } 671 672 case T_LONG: { 673 assert(patch_code == lir_patch_none, "no patching handled here"); 674 __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong()); 675 break; 676 } 677 678 case T_OBJECT: { 679 if (patch_code != lir_patch_none) { 680 jobject2reg_with_patching(dest->as_register(), info); 681 } else { 682 jobject2reg(c->as_jobject(), dest->as_register()); 683 } 684 break; 685 } 686 687 case T_METADATA: { 688 if (patch_code != lir_patch_none) { 689 klass2reg_with_patching(dest->as_register(), info); 690 } else { 691 metadata2reg(c->as_metadata(), dest->as_register()); 692 } 693 break; 694 } 695 696 case T_FLOAT: { 697 Register toc_reg = Z_R1_scratch; 698 __ load_toc(toc_reg); 699 address const_addr = __ float_constant(c->as_jfloat()); 700 if (const_addr == NULL) { 701 bailout("const section overflow"); 702 break; 703 } 704 int displ = const_addr - _masm->code()->consts()->start(); 705 if (dest->is_single_fpu()) { 706 __ z_ley(dest->as_float_reg(), displ, toc_reg); 707 } else { 708 assert(dest->is_single_cpu(), "Must be a cpu register."); 709 __ z_ly(dest->as_register(), displ, toc_reg); 710 } 711 } 712 break; 713 714 case T_DOUBLE: { 715 Register toc_reg = Z_R1_scratch; 716 __ load_toc(toc_reg); 717 address const_addr = __ double_constant(c->as_jdouble()); 718 if (const_addr == NULL) { 719 bailout("const section overflow"); 720 break; 721 } 722 int displ = const_addr - _masm->code()->consts()->start(); 723 if (dest->is_double_fpu()) { 724 __ z_ldy(dest->as_double_reg(), displ, toc_reg); 725 } else { 726 assert(dest->is_double_cpu(), "Must be a long register."); 727 __ z_lg(dest->as_register_lo(), displ, toc_reg); 728 } 729 } 730 break; 731 732 default: 733 ShouldNotReachHere(); 734 } 735 } 736 737 Address LIR_Assembler::as_Address(LIR_Address* addr) { 738 if (addr->base()->is_illegal()) { 739 Unimplemented(); 740 } 741 742 Register base = addr->base()->as_pointer_register(); 743 744 if (addr->index()->is_illegal()) { 745 return Address(base, addr->disp()); 746 } else if (addr->index()->is_cpu_register()) { 747 Register index = addr->index()->as_pointer_register(); 748 return Address(base, index, addr->disp()); 749 } else if (addr->index()->is_constant()) { 750 intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp(); 751 return Address(base, addr_offset); 752 } else { 753 ShouldNotReachHere(); 754 return Address(); 755 } 756 } 757 758 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 759 switch (type) { 760 case T_INT: 761 case T_FLOAT: { 762 Register tmp = Z_R1_scratch; 763 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 764 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 765 __ mem2reg_opt(tmp, from, false); 766 __ reg2mem_opt(tmp, to, false); 767 break; 768 } 769 case T_ADDRESS: 770 case T_OBJECT: { 771 Register tmp = Z_R1_scratch; 772 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 773 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 774 __ mem2reg_opt(tmp, from, true); 775 __ reg2mem_opt(tmp, to, true); 776 break; 777 } 778 case T_LONG: 779 case T_DOUBLE: { 780 Register tmp = Z_R1_scratch; 781 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 782 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 783 __ mem2reg_opt(tmp, from, true); 784 __ reg2mem_opt(tmp, to, true); 785 break; 786 } 787 788 default: 789 ShouldNotReachHere(); 790 } 791 } 792 793 // 4-byte accesses only! Don't use it to access 8 bytes! 794 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 795 ShouldNotCallThis(); 796 return 0; // unused 797 } 798 799 // 4-byte accesses only! Don't use it to access 8 bytes! 800 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 801 ShouldNotCallThis(); 802 return 0; // unused 803 } 804 805 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, 806 CodeEmitInfo* info, bool wide, bool unaligned) { 807 808 assert(type != T_METADATA, "load of metadata ptr not supported"); 809 LIR_Address* addr = src_opr->as_address_ptr(); 810 LIR_Opr to_reg = dest; 811 812 Register src = addr->base()->as_pointer_register(); 813 Register disp_reg = Z_R0; 814 int disp_value = addr->disp(); 815 bool needs_patching = (patch_code != lir_patch_none); 816 817 if (addr->base()->type() == T_OBJECT) { 818 __ verify_oop(src); 819 } 820 821 PatchingStub* patch = NULL; 822 if (needs_patching) { 823 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 824 assert(!to_reg->is_double_cpu() || 825 patch_code == lir_patch_none || 826 patch_code == lir_patch_normal, "patching doesn't match register"); 827 } 828 829 if (addr->index()->is_illegal()) { 830 if (!Immediate::is_simm20(disp_value)) { 831 if (needs_patching) { 832 __ load_const(Z_R1_scratch, (intptr_t)0); 833 } else { 834 __ load_const_optimized(Z_R1_scratch, disp_value); 835 } 836 disp_reg = Z_R1_scratch; 837 disp_value = 0; 838 } 839 } else { 840 if (!Immediate::is_simm20(disp_value)) { 841 __ load_const_optimized(Z_R1_scratch, disp_value); 842 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 843 disp_reg = Z_R1_scratch; 844 disp_value = 0; 845 } 846 disp_reg = addr->index()->as_pointer_register(); 847 } 848 849 // Remember the offset of the load. The patching_epilog must be done 850 // before the call to add_debug_info, otherwise the PcDescs don't get 851 // entered in increasing order. 852 int offset = code_offset(); 853 854 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 855 856 bool short_disp = Immediate::is_uimm12(disp_value); 857 858 switch (type) { 859 case T_BOOLEAN: // fall through 860 case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break; 861 case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break; 862 case T_SHORT : 863 if (short_disp) { 864 __ z_lh(dest->as_register(), disp_value, disp_reg, src); 865 } else { 866 __ z_lhy(dest->as_register(), disp_value, disp_reg, src); 867 } 868 break; 869 case T_INT : 870 if (short_disp) { 871 __ z_l(dest->as_register(), disp_value, disp_reg, src); 872 } else { 873 __ z_ly(dest->as_register(), disp_value, disp_reg, src); 874 } 875 break; 876 case T_ADDRESS: 877 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 878 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 879 __ decode_klass_not_null(dest->as_register()); 880 } else { 881 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 882 } 883 break; 884 case T_ARRAY : // fall through 885 case T_OBJECT: 886 { 887 if (UseCompressedOops && !wide) { 888 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 889 __ oop_decoder(dest->as_register(), dest->as_register(), true); 890 } else { 891 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 892 } 893 break; 894 } 895 case T_FLOAT: 896 if (short_disp) { 897 __ z_le(dest->as_float_reg(), disp_value, disp_reg, src); 898 } else { 899 __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src); 900 } 901 break; 902 case T_DOUBLE: 903 if (short_disp) { 904 __ z_ld(dest->as_double_reg(), disp_value, disp_reg, src); 905 } else { 906 __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src); 907 } 908 break; 909 case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break; 910 default : ShouldNotReachHere(); 911 } 912 if (type == T_ARRAY || type == T_OBJECT) { 913 __ verify_oop(dest->as_register()); 914 } 915 916 if (patch != NULL) { 917 patching_epilog(patch, patch_code, src, info); 918 } 919 if (info != NULL) add_debug_info_for_null_check(offset, info); 920 } 921 922 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 923 assert(src->is_stack(), "should not call otherwise"); 924 assert(dest->is_register(), "should not call otherwise"); 925 926 if (dest->is_single_cpu()) { 927 if (type == T_ARRAY || type == T_OBJECT) { 928 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 929 __ verify_oop(dest->as_register()); 930 } else if (type == T_METADATA) { 931 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 932 } else { 933 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false); 934 } 935 } else if (dest->is_double_cpu()) { 936 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix()); 937 __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true); 938 } else if (dest->is_single_fpu()) { 939 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 940 __ mem2freg_opt(dest->as_float_reg(), src_addr, false); 941 } else if (dest->is_double_fpu()) { 942 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 943 __ mem2freg_opt(dest->as_double_reg(), src_addr, true); 944 } else { 945 ShouldNotReachHere(); 946 } 947 } 948 949 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 950 assert(src->is_register(), "should not call otherwise"); 951 assert(dest->is_stack(), "should not call otherwise"); 952 953 if (src->is_single_cpu()) { 954 const Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 955 if (type == T_OBJECT || type == T_ARRAY) { 956 __ verify_oop(src->as_register()); 957 __ reg2mem_opt(src->as_register(), dst, true); 958 } else if (type == T_METADATA) { 959 __ reg2mem_opt(src->as_register(), dst, true); 960 } else { 961 __ reg2mem_opt(src->as_register(), dst, false); 962 } 963 } else if (src->is_double_cpu()) { 964 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix()); 965 __ reg2mem_opt(src->as_register_lo(), dstLO, true); 966 } else if (src->is_single_fpu()) { 967 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 968 __ freg2mem_opt(src->as_float_reg(), dst_addr, false); 969 } else if (src->is_double_fpu()) { 970 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 971 __ freg2mem_opt(src->as_double_reg(), dst_addr, true); 972 } else { 973 ShouldNotReachHere(); 974 } 975 } 976 977 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 978 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 979 if (from_reg->is_double_fpu()) { 980 // double to double moves 981 assert(to_reg->is_double_fpu(), "should match"); 982 __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg()); 983 } else { 984 // float to float moves 985 assert(to_reg->is_single_fpu(), "should match"); 986 __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg()); 987 } 988 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 989 if (from_reg->is_double_cpu()) { 990 __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 991 } else if (to_reg->is_double_cpu()) { 992 // int to int moves 993 __ z_lgr(to_reg->as_register_lo(), from_reg->as_register()); 994 } else { 995 // int to int moves 996 __ z_lgr(to_reg->as_register(), from_reg->as_register()); 997 } 998 } else { 999 ShouldNotReachHere(); 1000 } 1001 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1002 __ verify_oop(to_reg->as_register()); 1003 } 1004 } 1005 1006 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, 1007 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1008 bool wide, bool unaligned) { 1009 assert(type != T_METADATA, "store of metadata ptr not supported"); 1010 LIR_Address* addr = dest_opr->as_address_ptr(); 1011 1012 Register dest = addr->base()->as_pointer_register(); 1013 Register disp_reg = Z_R0; 1014 int disp_value = addr->disp(); 1015 bool needs_patching = (patch_code != lir_patch_none); 1016 1017 if (addr->base()->is_oop_register()) { 1018 __ verify_oop(dest); 1019 } 1020 1021 PatchingStub* patch = NULL; 1022 if (needs_patching) { 1023 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1024 assert(!from->is_double_cpu() || 1025 patch_code == lir_patch_none || 1026 patch_code == lir_patch_normal, "patching doesn't match register"); 1027 } 1028 1029 assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption"); 1030 if (addr->index()->is_illegal()) { 1031 if (!Immediate::is_simm20(disp_value)) { 1032 if (needs_patching) { 1033 __ load_const(Z_R1_scratch, (intptr_t)0); 1034 } else { 1035 __ load_const_optimized(Z_R1_scratch, disp_value); 1036 } 1037 disp_reg = Z_R1_scratch; 1038 disp_value = 0; 1039 } 1040 } else { 1041 if (!Immediate::is_simm20(disp_value)) { 1042 __ load_const_optimized(Z_R1_scratch, disp_value); 1043 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 1044 disp_reg = Z_R1_scratch; 1045 disp_value = 0; 1046 } 1047 disp_reg = addr->index()->as_pointer_register(); 1048 } 1049 1050 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 1051 1052 if (type == T_ARRAY || type == T_OBJECT) { 1053 __ verify_oop(from->as_register()); 1054 } 1055 1056 bool short_disp = Immediate::is_uimm12(disp_value); 1057 1058 // Remember the offset of the store. The patching_epilog must be done 1059 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1060 // entered in increasing order. 1061 int offset = code_offset(); 1062 switch (type) { 1063 case T_BOOLEAN: // fall through 1064 case T_BYTE : 1065 if (short_disp) { 1066 __ z_stc(from->as_register(), disp_value, disp_reg, dest); 1067 } else { 1068 __ z_stcy(from->as_register(), disp_value, disp_reg, dest); 1069 } 1070 break; 1071 case T_CHAR : // fall through 1072 case T_SHORT : 1073 if (short_disp) { 1074 __ z_sth(from->as_register(), disp_value, disp_reg, dest); 1075 } else { 1076 __ z_sthy(from->as_register(), disp_value, disp_reg, dest); 1077 } 1078 break; 1079 case T_INT : 1080 if (short_disp) { 1081 __ z_st(from->as_register(), disp_value, disp_reg, dest); 1082 } else { 1083 __ z_sty(from->as_register(), disp_value, disp_reg, dest); 1084 } 1085 break; 1086 case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break; 1087 case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break; 1088 break; 1089 case T_ARRAY : // fall through 1090 case T_OBJECT: 1091 { 1092 if (UseCompressedOops && !wide) { 1093 Register compressed_src = Z_R14; 1094 __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true); 1095 offset = code_offset(); 1096 if (short_disp) { 1097 __ z_st(compressed_src, disp_value, disp_reg, dest); 1098 } else { 1099 __ z_sty(compressed_src, disp_value, disp_reg, dest); 1100 } 1101 } else { 1102 __ z_stg(from->as_register(), disp_value, disp_reg, dest); 1103 } 1104 break; 1105 } 1106 case T_FLOAT : 1107 if (short_disp) { 1108 __ z_ste(from->as_float_reg(), disp_value, disp_reg, dest); 1109 } else { 1110 __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest); 1111 } 1112 break; 1113 case T_DOUBLE: 1114 if (short_disp) { 1115 __ z_std(from->as_double_reg(), disp_value, disp_reg, dest); 1116 } else { 1117 __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest); 1118 } 1119 break; 1120 default: ShouldNotReachHere(); 1121 } 1122 1123 if (patch != NULL) { 1124 patching_epilog(patch, patch_code, dest, info); 1125 } 1126 1127 if (info != NULL) add_debug_info_for_null_check(offset, info); 1128 } 1129 1130 1131 void LIR_Assembler::return_op(LIR_Opr result) { 1132 assert(result->is_illegal() || 1133 (result->is_single_cpu() && result->as_register() == Z_R2) || 1134 (result->is_double_cpu() && result->as_register_lo() == Z_R2) || 1135 (result->is_single_fpu() && result->as_float_reg() == Z_F0) || 1136 (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention"); 1137 1138 AddressLiteral pp(os::get_polling_page()); 1139 __ load_const_optimized(Z_R1_scratch, pp); 1140 1141 // Pop the frame before the safepoint code. 1142 int retPC_offset = initial_frame_size_in_bytes() + _z_abi16(return_pc); 1143 if (Displacement::is_validDisp(retPC_offset)) { 1144 __ z_lg(Z_R14, retPC_offset, Z_SP); 1145 __ add2reg(Z_SP, initial_frame_size_in_bytes()); 1146 } else { 1147 __ add2reg(Z_SP, initial_frame_size_in_bytes()); 1148 __ restore_return_pc(); 1149 } 1150 1151 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1152 __ reserved_stack_check(Z_R14); 1153 } 1154 1155 // We need to mark the code position where the load from the safepoint 1156 // polling page was emitted as relocInfo::poll_return_type here. 1157 __ relocate(relocInfo::poll_return_type); 1158 __ load_from_polling_page(Z_R1_scratch); 1159 1160 __ z_br(Z_R14); // Return to caller. 1161 } 1162 1163 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1164 AddressLiteral pp(os::get_polling_page()); 1165 __ load_const_optimized(tmp->as_register_lo(), pp); 1166 guarantee(info != NULL, "Shouldn't be NULL"); 1167 add_debug_info_for_branch(info); 1168 int offset = __ offset(); 1169 __ relocate(relocInfo::poll_type); 1170 __ load_from_polling_page(tmp->as_register_lo()); 1171 return offset; 1172 } 1173 1174 void LIR_Assembler::emit_static_call_stub() { 1175 1176 // Stub is fixed up when the corresponding call is converted from calling 1177 // compiled code to calling interpreted code. 1178 1179 address call_pc = __ pc(); 1180 address stub = __ start_a_stub(call_stub_size()); 1181 if (stub == NULL) { 1182 bailout("static call stub overflow"); 1183 return; 1184 } 1185 1186 int start = __ offset(); 1187 1188 __ relocate(static_stub_Relocation::spec(call_pc)); 1189 1190 // See also Matcher::interpreter_method_oop_reg(). 1191 AddressLiteral meta = __ allocate_metadata_address(NULL); 1192 bool success = __ load_const_from_toc(Z_method, meta); 1193 1194 __ set_inst_mark(); 1195 AddressLiteral a((address)-1); 1196 success = success && __ load_const_from_toc(Z_R1, a); 1197 if (!success) { 1198 bailout("const section overflow"); 1199 return; 1200 } 1201 1202 __ z_br(Z_R1); 1203 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1204 __ end_a_stub(); // Update current stubs pointer and restore insts_end. 1205 } 1206 1207 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1208 bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual; 1209 if (opr1->is_single_cpu()) { 1210 Register reg1 = opr1->as_register(); 1211 if (opr2->is_single_cpu()) { 1212 // cpu register - cpu register 1213 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1214 __ z_clgr(reg1, opr2->as_register()); 1215 } else { 1216 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 1217 if (unsigned_comp) { 1218 __ z_clr(reg1, opr2->as_register()); 1219 } else { 1220 __ z_cr(reg1, opr2->as_register()); 1221 } 1222 } 1223 } else if (opr2->is_stack()) { 1224 // cpu register - stack 1225 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1226 __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1227 } else { 1228 if (unsigned_comp) { 1229 __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1230 } else { 1231 __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1232 } 1233 } 1234 } else if (opr2->is_constant()) { 1235 // cpu register - constant 1236 LIR_Const* c = opr2->as_constant_ptr(); 1237 if (c->type() == T_INT) { 1238 if (unsigned_comp) { 1239 __ z_clfi(reg1, c->as_jint()); 1240 } else { 1241 __ z_cfi(reg1, c->as_jint()); 1242 } 1243 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 1244 // In 64bit oops are single register. 1245 jobject o = c->as_jobject(); 1246 if (o == NULL) { 1247 __ z_ltgr(reg1, reg1); 1248 } else { 1249 jobject2reg(o, Z_R1_scratch); 1250 __ z_cgr(reg1, Z_R1_scratch); 1251 } 1252 } else { 1253 fatal("unexpected type: %s", basictype_to_str(c->type())); 1254 } 1255 // cpu register - address 1256 } else if (opr2->is_address()) { 1257 if (op->info() != NULL) { 1258 add_debug_info_for_null_check_here(op->info()); 1259 } 1260 if (unsigned_comp) { 1261 __ z_cly(reg1, as_Address(opr2->as_address_ptr())); 1262 } else { 1263 __ z_cy(reg1, as_Address(opr2->as_address_ptr())); 1264 } 1265 } else { 1266 ShouldNotReachHere(); 1267 } 1268 1269 } else if (opr1->is_double_cpu()) { 1270 assert(!unsigned_comp, "unexpected"); 1271 Register xlo = opr1->as_register_lo(); 1272 Register xhi = opr1->as_register_hi(); 1273 if (opr2->is_double_cpu()) { 1274 __ z_cgr(xlo, opr2->as_register_lo()); 1275 } else if (opr2->is_constant()) { 1276 // cpu register - constant 0 1277 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 1278 __ z_ltgr(xlo, xlo); 1279 } else { 1280 ShouldNotReachHere(); 1281 } 1282 1283 } else if (opr1->is_single_fpu()) { 1284 if (opr2->is_single_fpu()) { 1285 __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg()); 1286 } else { 1287 // stack slot 1288 Address addr = frame_map()->address_for_slot(opr2->single_stack_ix()); 1289 if (Immediate::is_uimm12(addr.disp())) { 1290 __ z_ceb(opr1->as_float_reg(), addr); 1291 } else { 1292 __ z_ley(Z_fscratch_1, addr); 1293 __ z_cebr(opr1->as_float_reg(), Z_fscratch_1); 1294 } 1295 } 1296 } else if (opr1->is_double_fpu()) { 1297 if (opr2->is_double_fpu()) { 1298 __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg()); 1299 } else { 1300 // stack slot 1301 Address addr = frame_map()->address_for_slot(opr2->double_stack_ix()); 1302 if (Immediate::is_uimm12(addr.disp())) { 1303 __ z_cdb(opr1->as_double_reg(), addr); 1304 } else { 1305 __ z_ldy(Z_fscratch_1, addr); 1306 __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1); 1307 } 1308 } 1309 } else { 1310 ShouldNotReachHere(); 1311 } 1312 } 1313 1314 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1315 Label done; 1316 Register dreg = dst->as_register(); 1317 1318 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1319 assert((left->is_single_fpu() && right->is_single_fpu()) || 1320 (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types"); 1321 bool is_single = left->is_single_fpu(); 1322 bool is_unordered_less = (code == lir_ucmp_fd2i); 1323 FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg(); 1324 FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg(); 1325 if (is_single) { 1326 __ z_cebr(lreg, rreg); 1327 } else { 1328 __ z_cdbr(lreg, rreg); 1329 } 1330 if (VM_Version::has_LoadStoreConditional()) { 1331 Register one = Z_R0_scratch; 1332 Register minus_one = Z_R1_scratch; 1333 __ z_lghi(minus_one, -1); 1334 __ z_lghi(one, 1); 1335 __ z_lghi(dreg, 0); 1336 __ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered); 1337 __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow); 1338 } else { 1339 __ clear_reg(dreg, true, false); 1340 __ z_bre(done); // if (left == right) dst = 0 1341 1342 // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1 1343 __ z_lhi(dreg, 1); 1344 __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done); 1345 1346 // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1 1347 __ z_lhi(dreg, -1); 1348 } 1349 } else { 1350 assert(code == lir_cmp_l2i, "check"); 1351 if (VM_Version::has_LoadStoreConditional()) { 1352 Register one = Z_R0_scratch; 1353 Register minus_one = Z_R1_scratch; 1354 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1355 __ z_lghi(minus_one, -1); 1356 __ z_lghi(one, 1); 1357 __ z_lghi(dreg, 0); 1358 __ z_locgr(dreg, one, Assembler::bcondHigh); 1359 __ z_locgr(dreg, minus_one, Assembler::bcondLow); 1360 } else { 1361 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1362 __ z_lghi(dreg, 0); // eq value 1363 __ z_bre(done); 1364 __ z_lghi(dreg, 1); // gt value 1365 __ z_brh(done); 1366 __ z_lghi(dreg, -1); // lt value 1367 } 1368 } 1369 __ bind(done); 1370 } 1371 1372 // result = condition ? opr1 : opr2 1373 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1374 Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual; 1375 switch (condition) { 1376 case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break; 1377 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break; 1378 case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break; 1379 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1380 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1381 case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break; 1382 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1383 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1384 default: ShouldNotReachHere(); 1385 } 1386 1387 if (opr1->is_cpu_register()) { 1388 reg2reg(opr1, result); 1389 } else if (opr1->is_stack()) { 1390 stack2reg(opr1, result, result->type()); 1391 } else if (opr1->is_constant()) { 1392 const2reg(opr1, result, lir_patch_none, NULL); 1393 } else { 1394 ShouldNotReachHere(); 1395 } 1396 1397 if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) { 1398 // Optimized version that does not require a branch. 1399 if (opr2->is_single_cpu()) { 1400 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1401 __ z_locgr(result->as_register(), opr2->as_register(), ncond); 1402 } else if (opr2->is_double_cpu()) { 1403 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1404 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1405 __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond); 1406 } else if (opr2->is_single_stack()) { 1407 __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond); 1408 } else if (opr2->is_double_stack()) { 1409 __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond); 1410 } else { 1411 ShouldNotReachHere(); 1412 } 1413 } else { 1414 Label skip; 1415 __ z_brc(acond, skip); 1416 if (opr2->is_cpu_register()) { 1417 reg2reg(opr2, result); 1418 } else if (opr2->is_stack()) { 1419 stack2reg(opr2, result, result->type()); 1420 } else if (opr2->is_constant()) { 1421 const2reg(opr2, result, lir_patch_none, NULL); 1422 } else { 1423 ShouldNotReachHere(); 1424 } 1425 __ bind(skip); 1426 } 1427 } 1428 1429 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1430 CodeEmitInfo* info, bool pop_fpu_stack) { 1431 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1432 1433 if (left->is_single_cpu()) { 1434 assert(left == dest, "left and dest must be equal"); 1435 Register lreg = left->as_register(); 1436 1437 if (right->is_single_cpu()) { 1438 // cpu register - cpu register 1439 Register rreg = right->as_register(); 1440 switch (code) { 1441 case lir_add: __ z_ar (lreg, rreg); break; 1442 case lir_sub: __ z_sr (lreg, rreg); break; 1443 case lir_mul: __ z_msr(lreg, rreg); break; 1444 default: ShouldNotReachHere(); 1445 } 1446 1447 } else if (right->is_stack()) { 1448 // cpu register - stack 1449 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1450 switch (code) { 1451 case lir_add: __ z_ay(lreg, raddr); break; 1452 case lir_sub: __ z_sy(lreg, raddr); break; 1453 default: ShouldNotReachHere(); 1454 } 1455 1456 } else if (right->is_constant()) { 1457 // cpu register - constant 1458 jint c = right->as_constant_ptr()->as_jint(); 1459 switch (code) { 1460 case lir_add: __ z_agfi(lreg, c); break; 1461 case lir_sub: __ z_agfi(lreg, -c); break; // note: -min_jint == min_jint 1462 case lir_mul: __ z_msfi(lreg, c); break; 1463 default: ShouldNotReachHere(); 1464 } 1465 1466 } else { 1467 ShouldNotReachHere(); 1468 } 1469 1470 } else if (left->is_double_cpu()) { 1471 assert(left == dest, "left and dest must be equal"); 1472 Register lreg_lo = left->as_register_lo(); 1473 Register lreg_hi = left->as_register_hi(); 1474 1475 if (right->is_double_cpu()) { 1476 // cpu register - cpu register 1477 Register rreg_lo = right->as_register_lo(); 1478 Register rreg_hi = right->as_register_hi(); 1479 assert_different_registers(lreg_lo, rreg_lo); 1480 switch (code) { 1481 case lir_add: 1482 __ z_agr(lreg_lo, rreg_lo); 1483 break; 1484 case lir_sub: 1485 __ z_sgr(lreg_lo, rreg_lo); 1486 break; 1487 case lir_mul: 1488 __ z_msgr(lreg_lo, rreg_lo); 1489 break; 1490 default: 1491 ShouldNotReachHere(); 1492 } 1493 1494 } else if (right->is_constant()) { 1495 // cpu register - constant 1496 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1497 switch (code) { 1498 case lir_add: __ z_agfi(lreg_lo, c); break; 1499 case lir_sub: 1500 if (c != min_jint) { 1501 __ z_agfi(lreg_lo, -c); 1502 } else { 1503 // -min_jint cannot be represented as simm32 in z_agfi 1504 // min_jint sign extended: 0xffffffff80000000 1505 // -min_jint as 64 bit integer: 0x0000000080000000 1506 // 0x80000000 can be represented as uimm32 in z_algfi 1507 // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000 1508 __ z_algfi(lreg_lo, UCONST64(0x80000000)); 1509 } 1510 break; 1511 case lir_mul: __ z_msgfi(lreg_lo, c); break; 1512 default: 1513 ShouldNotReachHere(); 1514 } 1515 1516 } else { 1517 ShouldNotReachHere(); 1518 } 1519 1520 } else if (left->is_single_fpu()) { 1521 assert(left == dest, "left and dest must be equal"); 1522 FloatRegister lreg = left->as_float_reg(); 1523 FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg; 1524 Address raddr; 1525 1526 if (rreg == fnoreg) { 1527 assert(right->is_single_stack(), "constants should be loaded into register"); 1528 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1529 if (!Immediate::is_uimm12(raddr.disp())) { 1530 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false); 1531 } 1532 } 1533 1534 if (rreg != fnoreg) { 1535 switch (code) { 1536 case lir_add: __ z_aebr(lreg, rreg); break; 1537 case lir_sub: __ z_sebr(lreg, rreg); break; 1538 case lir_mul_strictfp: // fall through 1539 case lir_mul: __ z_meebr(lreg, rreg); break; 1540 case lir_div_strictfp: // fall through 1541 case lir_div: __ z_debr(lreg, rreg); break; 1542 default: ShouldNotReachHere(); 1543 } 1544 } else { 1545 switch (code) { 1546 case lir_add: __ z_aeb(lreg, raddr); break; 1547 case lir_sub: __ z_seb(lreg, raddr); break; 1548 case lir_mul_strictfp: // fall through 1549 case lir_mul: __ z_meeb(lreg, raddr); break; 1550 case lir_div_strictfp: // fall through 1551 case lir_div: __ z_deb(lreg, raddr); break; 1552 default: ShouldNotReachHere(); 1553 } 1554 } 1555 } else if (left->is_double_fpu()) { 1556 assert(left == dest, "left and dest must be equal"); 1557 FloatRegister lreg = left->as_double_reg(); 1558 FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg; 1559 Address raddr; 1560 1561 if (rreg == fnoreg) { 1562 assert(right->is_double_stack(), "constants should be loaded into register"); 1563 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 1564 if (!Immediate::is_uimm12(raddr.disp())) { 1565 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true); 1566 } 1567 } 1568 1569 if (rreg != fnoreg) { 1570 switch (code) { 1571 case lir_add: __ z_adbr(lreg, rreg); break; 1572 case lir_sub: __ z_sdbr(lreg, rreg); break; 1573 case lir_mul_strictfp: // fall through 1574 case lir_mul: __ z_mdbr(lreg, rreg); break; 1575 case lir_div_strictfp: // fall through 1576 case lir_div: __ z_ddbr(lreg, rreg); break; 1577 default: ShouldNotReachHere(); 1578 } 1579 } else { 1580 switch (code) { 1581 case lir_add: __ z_adb(lreg, raddr); break; 1582 case lir_sub: __ z_sdb(lreg, raddr); break; 1583 case lir_mul_strictfp: // fall through 1584 case lir_mul: __ z_mdb(lreg, raddr); break; 1585 case lir_div_strictfp: // fall through 1586 case lir_div: __ z_ddb(lreg, raddr); break; 1587 default: ShouldNotReachHere(); 1588 } 1589 } 1590 } else if (left->is_address()) { 1591 assert(left == dest, "left and dest must be equal"); 1592 assert(code == lir_add, "unsupported operation"); 1593 assert(right->is_constant(), "unsupported operand"); 1594 jint c = right->as_constant_ptr()->as_jint(); 1595 LIR_Address* lir_addr = left->as_address_ptr(); 1596 Address addr = as_Address(lir_addr); 1597 switch (lir_addr->type()) { 1598 case T_INT: 1599 __ add2mem_32(addr, c, Z_R1_scratch); 1600 break; 1601 case T_LONG: 1602 __ add2mem_64(addr, c, Z_R1_scratch); 1603 break; 1604 default: 1605 ShouldNotReachHere(); 1606 } 1607 } else { 1608 ShouldNotReachHere(); 1609 } 1610 } 1611 1612 void LIR_Assembler::fpop() { 1613 // do nothing 1614 } 1615 1616 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1617 switch (code) { 1618 case lir_sqrt: { 1619 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1620 FloatRegister src_reg = value->as_double_reg(); 1621 FloatRegister dst_reg = dest->as_double_reg(); 1622 __ z_sqdbr(dst_reg, src_reg); 1623 break; 1624 } 1625 case lir_abs: { 1626 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1627 FloatRegister src_reg = value->as_double_reg(); 1628 FloatRegister dst_reg = dest->as_double_reg(); 1629 __ z_lpdbr(dst_reg, src_reg); 1630 break; 1631 } 1632 default: { 1633 ShouldNotReachHere(); 1634 break; 1635 } 1636 } 1637 } 1638 1639 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1640 if (left->is_single_cpu()) { 1641 Register reg = left->as_register(); 1642 if (right->is_constant()) { 1643 int val = right->as_constant_ptr()->as_jint(); 1644 switch (code) { 1645 case lir_logic_and: __ z_nilf(reg, val); break; 1646 case lir_logic_or: __ z_oilf(reg, val); break; 1647 case lir_logic_xor: __ z_xilf(reg, val); break; 1648 default: ShouldNotReachHere(); 1649 } 1650 } else if (right->is_stack()) { 1651 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1652 switch (code) { 1653 case lir_logic_and: __ z_ny(reg, raddr); break; 1654 case lir_logic_or: __ z_oy(reg, raddr); break; 1655 case lir_logic_xor: __ z_xy(reg, raddr); break; 1656 default: ShouldNotReachHere(); 1657 } 1658 } else { 1659 Register rright = right->as_register(); 1660 switch (code) { 1661 case lir_logic_and: __ z_nr(reg, rright); break; 1662 case lir_logic_or : __ z_or(reg, rright); break; 1663 case lir_logic_xor: __ z_xr(reg, rright); break; 1664 default: ShouldNotReachHere(); 1665 } 1666 } 1667 move_regs(reg, dst->as_register()); 1668 } else { 1669 Register l_lo = left->as_register_lo(); 1670 if (right->is_constant()) { 1671 __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); 1672 switch (code) { 1673 case lir_logic_and: 1674 __ z_ngr(l_lo, Z_R1_scratch); 1675 break; 1676 case lir_logic_or: 1677 __ z_ogr(l_lo, Z_R1_scratch); 1678 break; 1679 case lir_logic_xor: 1680 __ z_xgr(l_lo, Z_R1_scratch); 1681 break; 1682 default: ShouldNotReachHere(); 1683 } 1684 } else { 1685 Register r_lo; 1686 if (right->type() == T_OBJECT || right->type() == T_ARRAY) { 1687 r_lo = right->as_register(); 1688 } else { 1689 r_lo = right->as_register_lo(); 1690 } 1691 switch (code) { 1692 case lir_logic_and: 1693 __ z_ngr(l_lo, r_lo); 1694 break; 1695 case lir_logic_or: 1696 __ z_ogr(l_lo, r_lo); 1697 break; 1698 case lir_logic_xor: 1699 __ z_xgr(l_lo, r_lo); 1700 break; 1701 default: ShouldNotReachHere(); 1702 } 1703 } 1704 1705 Register dst_lo = dst->as_register_lo(); 1706 1707 move_regs(l_lo, dst_lo); 1708 } 1709 } 1710 1711 // See operand selection in LIRGenerator::do_ArithmeticOp_Int(). 1712 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 1713 if (left->is_double_cpu()) { 1714 // 64 bit integer case 1715 assert(left->is_double_cpu(), "left must be register"); 1716 assert(right->is_double_cpu() || is_power_of_2_long(right->as_jlong()), 1717 "right must be register or power of 2 constant"); 1718 assert(result->is_double_cpu(), "result must be register"); 1719 1720 Register lreg = left->as_register_lo(); 1721 Register dreg = result->as_register_lo(); 1722 1723 if (right->is_constant()) { 1724 // Convert division by a power of two into some shifts and logical operations. 1725 Register treg1 = Z_R0_scratch; 1726 Register treg2 = Z_R1_scratch; 1727 jlong divisor = right->as_jlong(); 1728 jlong log_divisor = log2_long(right->as_jlong()); 1729 1730 if (divisor == min_jlong) { 1731 // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1. 1732 if (dreg == lreg) { 1733 NearLabel done; 1734 __ load_const_optimized(treg2, min_jlong); 1735 __ z_cgr(lreg, treg2); 1736 __ z_lghi(dreg, 0); // Preserves condition code. 1737 __ z_brne(done); 1738 __ z_lghi(dreg, 1); // min_jlong / min_jlong = 1 1739 __ bind(done); 1740 } else { 1741 assert_different_registers(dreg, lreg); 1742 NearLabel done; 1743 __ z_lghi(dreg, 0); 1744 __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done); 1745 __ z_lghi(dreg, 1); 1746 __ bind(done); 1747 } 1748 return; 1749 } 1750 __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG); 1751 if (divisor == 2) { 1752 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1753 } else { 1754 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1755 __ and_imm(treg2, divisor - 1, treg1, true); 1756 } 1757 if (code == lir_idiv) { 1758 __ z_agr(dreg, treg2); 1759 __ z_srag(dreg, dreg, log_divisor); 1760 } else { 1761 assert(code == lir_irem, "check"); 1762 __ z_agr(treg2, dreg); 1763 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1764 __ z_sgr(dreg, treg2); 1765 } 1766 return; 1767 } 1768 1769 // Divisor is not a power of 2 constant. 1770 Register rreg = right->as_register_lo(); 1771 Register treg = temp->as_register_lo(); 1772 assert(right->is_double_cpu(), "right must be register"); 1773 assert(lreg == Z_R11, "see ldivInOpr()"); 1774 assert(rreg != lreg, "right register must not be same as left register"); 1775 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) || 1776 (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()"); 1777 1778 Register R1 = lreg->predecessor(); 1779 Register R2 = rreg; 1780 assert(code != lir_idiv || lreg==dreg, "see code below"); 1781 if (code == lir_idiv) { 1782 __ z_lcgr(lreg, lreg); 1783 } else { 1784 __ clear_reg(dreg, true, false); 1785 } 1786 NearLabel done; 1787 __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); 1788 if (code == lir_idiv) { 1789 __ z_lcgr(lreg, lreg); // Revert lcgr above. 1790 } 1791 if (ImplicitDiv0Checks) { 1792 // No debug info because the idiv won't trap. 1793 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1794 // which is unnecessary, too. 1795 add_debug_info_for_div0(__ offset(), info); 1796 } 1797 __ z_dsgr(R1, R2); 1798 __ bind(done); 1799 return; 1800 } 1801 1802 // 32 bit integer case 1803 1804 assert(left->is_single_cpu(), "left must be register"); 1805 assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant"); 1806 assert(result->is_single_cpu(), "result must be register"); 1807 1808 Register lreg = left->as_register(); 1809 Register dreg = result->as_register(); 1810 1811 if (right->is_constant()) { 1812 // Convert division by a power of two into some shifts and logical operations. 1813 Register treg1 = Z_R0_scratch; 1814 Register treg2 = Z_R1_scratch; 1815 jlong divisor = right->as_jint(); 1816 jlong log_divisor = log2_long(right->as_jint()); 1817 __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend 1818 if (divisor == 2) { 1819 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1820 } else { 1821 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1822 __ and_imm(treg2, divisor - 1, treg1, true); 1823 } 1824 if (code == lir_idiv) { 1825 __ z_agr(dreg, treg2); 1826 __ z_srag(dreg, dreg, log_divisor); 1827 } else { 1828 assert(code == lir_irem, "check"); 1829 __ z_agr(treg2, dreg); 1830 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1831 __ z_sgr(dreg, treg2); 1832 } 1833 return; 1834 } 1835 1836 // Divisor is not a power of 2 constant. 1837 Register rreg = right->as_register(); 1838 Register treg = temp->as_register(); 1839 assert(right->is_single_cpu(), "right must be register"); 1840 assert(lreg == Z_R11, "left register must be rax,"); 1841 assert(rreg != lreg, "right register must not be same as left register"); 1842 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) 1843 || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()"); 1844 1845 Register R1 = lreg->predecessor(); 1846 Register R2 = rreg; 1847 __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend 1848 if (ImplicitDiv0Checks) { 1849 // No debug info because the idiv won't trap. 1850 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1851 // which is unnecessary, too. 1852 add_debug_info_for_div0(__ offset(), info); 1853 } 1854 __ z_dsgfr(R1, R2); 1855 } 1856 1857 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1858 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1859 assert(exceptionPC->as_register() == Z_EXC_PC, "should match"); 1860 1861 // Exception object is not added to oop map by LinearScan 1862 // (LinearScan assumes that no oops are in fixed registers). 1863 info->add_register_oop(exceptionOop); 1864 1865 // Reuse the debug info from the safepoint poll for the throw op itself. 1866 __ get_PC(Z_EXC_PC); 1867 add_call_info(__ offset(), info); // for exception handler 1868 address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1869 : Runtime1::handle_exception_nofpu_id); 1870 emit_call_c(stub); 1871 } 1872 1873 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1874 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1875 1876 __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry); 1877 } 1878 1879 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1880 ciArrayKlass* default_type = op->expected_type(); 1881 Register src = op->src()->as_register(); 1882 Register dst = op->dst()->as_register(); 1883 Register src_pos = op->src_pos()->as_register(); 1884 Register dst_pos = op->dst_pos()->as_register(); 1885 Register length = op->length()->as_register(); 1886 Register tmp = op->tmp()->as_register(); 1887 1888 CodeStub* stub = op->stub(); 1889 int flags = op->flags(); 1890 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1891 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1892 1893 // If we don't know anything, just go through the generic arraycopy. 1894 if (default_type == NULL) { 1895 Label done; 1896 // Save outgoing arguments in callee saved registers (C convention) in case 1897 // a call to System.arraycopy is needed. 1898 Register callee_saved_src = Z_R10; 1899 Register callee_saved_src_pos = Z_R11; 1900 Register callee_saved_dst = Z_R12; 1901 Register callee_saved_dst_pos = Z_R13; 1902 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 1903 1904 __ lgr_if_needed(callee_saved_src, src); 1905 __ lgr_if_needed(callee_saved_src_pos, src_pos); 1906 __ lgr_if_needed(callee_saved_dst, dst); 1907 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 1908 __ lgr_if_needed(callee_saved_length, length); 1909 1910 // C function requires 64 bit values. 1911 __ z_lgfr(src_pos, src_pos); 1912 __ z_lgfr(dst_pos, dst_pos); 1913 __ z_lgfr(length, length); 1914 1915 address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 1916 1917 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1918 1919 // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. 1920 1921 // The arguments are in the corresponding registers. 1922 assert(Z_ARG1 == src, "assumption"); 1923 assert(Z_ARG2 == src_pos, "assumption"); 1924 assert(Z_ARG3 == dst, "assumption"); 1925 assert(Z_ARG4 == dst_pos, "assumption"); 1926 assert(Z_ARG5 == length, "assumption"); 1927 if (copyfunc_addr == NULL) { // Use C version if stub was not generated. 1928 emit_call_c(C_entry); 1929 } else { 1930 #ifndef PRODUCT 1931 if (PrintC1Statistics) { 1932 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); 1933 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 1934 } 1935 #endif 1936 emit_call_c(copyfunc_addr); 1937 } 1938 CHECK_BAILOUT(); 1939 1940 __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 1941 1942 if (copyfunc_addr != NULL) { 1943 __ z_lgr(tmp, Z_RET); 1944 __ z_xilf(tmp, -1); 1945 } 1946 1947 // Restore values from callee saved registers so they are where the stub 1948 // expects them. 1949 __ lgr_if_needed(src, callee_saved_src); 1950 __ lgr_if_needed(src_pos, callee_saved_src_pos); 1951 __ lgr_if_needed(dst, callee_saved_dst); 1952 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 1953 __ lgr_if_needed(length, callee_saved_length); 1954 1955 if (copyfunc_addr != NULL) { 1956 __ z_sr(length, tmp); 1957 __ z_ar(src_pos, tmp); 1958 __ z_ar(dst_pos, tmp); 1959 } 1960 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1961 1962 __ bind(*stub->continuation()); 1963 return; 1964 } 1965 1966 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 1967 1968 int elem_size = type2aelembytes(basic_type); 1969 int shift_amount; 1970 1971 switch (elem_size) { 1972 case 1 : 1973 shift_amount = 0; 1974 break; 1975 case 2 : 1976 shift_amount = 1; 1977 break; 1978 case 4 : 1979 shift_amount = 2; 1980 break; 1981 case 8 : 1982 shift_amount = 3; 1983 break; 1984 default: 1985 shift_amount = -1; 1986 ShouldNotReachHere(); 1987 } 1988 1989 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 1990 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 1991 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 1992 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 1993 1994 // Length and pos's are all sign extended at this point on 64bit. 1995 1996 // test for NULL 1997 if (flags & LIR_OpArrayCopy::src_null_check) { 1998 __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 1999 } 2000 if (flags & LIR_OpArrayCopy::dst_null_check) { 2001 __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2002 } 2003 2004 // Check if negative. 2005 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2006 __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2007 } 2008 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2009 __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2010 } 2011 2012 // If the compiler was not able to prove that exact type of the source or the destination 2013 // of the arraycopy is an array type, check at runtime if the source or the destination is 2014 // an instance type. 2015 if (flags & LIR_OpArrayCopy::type_check) { 2016 assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions"); 2017 2018 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2019 __ load_klass(tmp, dst); 2020 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2021 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2022 } 2023 2024 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2025 __ load_klass(tmp, src); 2026 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2027 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2028 } 2029 } 2030 2031 if (flags & LIR_OpArrayCopy::src_range_check) { 2032 __ z_la(tmp, Address(src_pos, length)); 2033 __ z_cl(tmp, src_length_addr); 2034 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2035 } 2036 if (flags & LIR_OpArrayCopy::dst_range_check) { 2037 __ z_la(tmp, Address(dst_pos, length)); 2038 __ z_cl(tmp, dst_length_addr); 2039 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2040 } 2041 2042 if (flags & LIR_OpArrayCopy::length_positive_check) { 2043 __ z_ltr(length, length); 2044 __ branch_optimized(Assembler::bcondNegative, *stub->entry()); 2045 } 2046 2047 // Stubs require 64 bit values. 2048 __ z_lgfr(src_pos, src_pos); // int -> long 2049 __ z_lgfr(dst_pos, dst_pos); // int -> long 2050 __ z_lgfr(length, length); // int -> long 2051 2052 if (flags & LIR_OpArrayCopy::type_check) { 2053 // We don't know the array types are compatible. 2054 if (basic_type != T_OBJECT) { 2055 // Simple test for basic type arrays. 2056 if (UseCompressedClassPointers) { 2057 __ z_l(tmp, src_klass_addr); 2058 __ z_c(tmp, dst_klass_addr); 2059 } else { 2060 __ z_lg(tmp, src_klass_addr); 2061 __ z_cg(tmp, dst_klass_addr); 2062 } 2063 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2064 } else { 2065 // For object arrays, if src is a sub class of dst then we can 2066 // safely do the copy. 2067 NearLabel cont, slow; 2068 Register src_klass = Z_R1_scratch; 2069 Register dst_klass = Z_R10; 2070 2071 __ load_klass(src_klass, src); 2072 __ load_klass(dst_klass, dst); 2073 2074 __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, NULL); 2075 2076 store_parameter(src_klass, 0); // sub 2077 store_parameter(dst_klass, 1); // super 2078 emit_call_c(Runtime1::entry_for (Runtime1::slow_subtype_check_id)); 2079 CHECK_BAILOUT(); 2080 // Sets condition code 0 for match (2 otherwise). 2081 __ branch_optimized(Assembler::bcondEqual, cont); 2082 2083 __ bind(slow); 2084 2085 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2086 if (copyfunc_addr != NULL) { // use stub if available 2087 // Src is not a sub class of dst so we have to do a 2088 // per-element check. 2089 2090 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2091 if ((flags & mask) != mask) { 2092 // Check that at least both of them object arrays. 2093 assert(flags & mask, "one of the two should be known to be an object array"); 2094 2095 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2096 __ load_klass(tmp, src); 2097 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2098 __ load_klass(tmp, dst); 2099 } 2100 Address klass_lh_addr(tmp, Klass::layout_helper_offset()); 2101 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2102 __ load_const_optimized(Z_R1_scratch, objArray_lh); 2103 __ z_c(Z_R1_scratch, klass_lh_addr); 2104 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2105 } 2106 2107 // Save outgoing arguments in callee saved registers (C convention) in case 2108 // a call to System.arraycopy is needed. 2109 Register callee_saved_src = Z_R10; 2110 Register callee_saved_src_pos = Z_R11; 2111 Register callee_saved_dst = Z_R12; 2112 Register callee_saved_dst_pos = Z_R13; 2113 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 2114 2115 __ lgr_if_needed(callee_saved_src, src); 2116 __ lgr_if_needed(callee_saved_src_pos, src_pos); 2117 __ lgr_if_needed(callee_saved_dst, dst); 2118 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 2119 __ lgr_if_needed(callee_saved_length, length); 2120 2121 __ z_llgfr(length, length); // Higher 32bits must be null. 2122 2123 __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset 2124 __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset 2125 2126 __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type))); 2127 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2128 __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type))); 2129 assert_different_registers(Z_ARG2, dst, length); 2130 2131 __ z_lgr(Z_ARG3, length); 2132 assert_different_registers(Z_ARG3, dst); 2133 2134 __ load_klass(Z_ARG5, dst); 2135 __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset())); 2136 __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset())); 2137 emit_call_c(copyfunc_addr); 2138 CHECK_BAILOUT(); 2139 2140 #ifndef PRODUCT 2141 if (PrintC1Statistics) { 2142 NearLabel failed; 2143 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed); 2144 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt); 2145 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2146 __ bind(failed); 2147 } 2148 #endif 2149 2150 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2151 2152 #ifndef PRODUCT 2153 if (PrintC1Statistics) { 2154 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); 2155 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2156 } 2157 #endif 2158 2159 __ z_lgr(tmp, Z_RET); 2160 __ z_xilf(tmp, -1); 2161 2162 // Restore previously spilled arguments 2163 __ lgr_if_needed(src, callee_saved_src); 2164 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2165 __ lgr_if_needed(dst, callee_saved_dst); 2166 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2167 __ lgr_if_needed(length, callee_saved_length); 2168 2169 __ z_sr(length, tmp); 2170 __ z_ar(src_pos, tmp); 2171 __ z_ar(dst_pos, tmp); 2172 } 2173 2174 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2175 2176 __ bind(cont); 2177 } 2178 } 2179 2180 #ifdef ASSERT 2181 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2182 // Sanity check the known type with the incoming class. For the 2183 // primitive case the types must match exactly with src.klass and 2184 // dst.klass each exactly matching the default type. For the 2185 // object array case, if no type check is needed then either the 2186 // dst type is exactly the expected type and the src type is a 2187 // subtype which we can't check or src is the same array as dst 2188 // but not necessarily exactly of type default_type. 2189 NearLabel known_ok, halt; 2190 metadata2reg(default_type->constant_encoding(), tmp); 2191 if (UseCompressedClassPointers) { 2192 __ encode_klass_not_null(tmp); 2193 } 2194 2195 if (basic_type != T_OBJECT) { 2196 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2197 else { __ z_cg(tmp, dst_klass_addr); } 2198 __ branch_optimized(Assembler::bcondNotEqual, halt); 2199 if (UseCompressedClassPointers) { __ z_c (tmp, src_klass_addr); } 2200 else { __ z_cg(tmp, src_klass_addr); } 2201 __ branch_optimized(Assembler::bcondEqual, known_ok); 2202 } else { 2203 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2204 else { __ z_cg(tmp, dst_klass_addr); } 2205 __ branch_optimized(Assembler::bcondEqual, known_ok); 2206 __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); 2207 } 2208 __ bind(halt); 2209 __ stop("incorrect type information in arraycopy"); 2210 __ bind(known_ok); 2211 } 2212 #endif 2213 2214 #ifndef PRODUCT 2215 if (PrintC1Statistics) { 2216 __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type)); 2217 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2218 } 2219 #endif 2220 2221 __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset 2222 __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset 2223 2224 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2225 __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type))); 2226 assert_different_registers(Z_ARG2, length); 2227 __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type))); 2228 __ lgr_if_needed(Z_ARG3, length); 2229 2230 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2231 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2232 const char *name; 2233 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2234 __ call_VM_leaf(entry); 2235 2236 __ bind(*stub->continuation()); 2237 } 2238 2239 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2240 if (dest->is_single_cpu()) { 2241 if (left->type() == T_OBJECT) { 2242 switch (code) { 2243 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2244 case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2245 case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2246 default: ShouldNotReachHere(); 2247 } 2248 } else { 2249 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2250 Register masked_count = Z_R1_scratch; 2251 __ z_lr(masked_count, count->as_register()); 2252 __ z_nill(masked_count, 31); 2253 switch (code) { 2254 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break; 2255 case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break; 2256 case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break; 2257 default: ShouldNotReachHere(); 2258 } 2259 } 2260 } else { 2261 switch (code) { 2262 case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2263 case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2264 case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2265 default: ShouldNotReachHere(); 2266 } 2267 } 2268 } 2269 2270 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2271 if (left->type() == T_OBJECT) { 2272 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2273 Register l = left->as_register(); 2274 Register d = dest->as_register_lo(); 2275 switch (code) { 2276 case lir_shl: __ z_sllg (d, l, count); break; 2277 case lir_shr: __ z_srag (d, l, count); break; 2278 case lir_ushr: __ z_srlg (d, l, count); break; 2279 default: ShouldNotReachHere(); 2280 } 2281 return; 2282 } 2283 if (dest->is_single_cpu()) { 2284 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2285 count = count & 0x1F; // Java spec 2286 switch (code) { 2287 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break; 2288 case lir_shr: __ z_sra (dest->as_register(), count); break; 2289 case lir_ushr: __ z_srl (dest->as_register(), count); break; 2290 default: ShouldNotReachHere(); 2291 } 2292 } else if (dest->is_double_cpu()) { 2293 count = count & 63; // Java spec 2294 Register l = left->as_pointer_register(); 2295 Register d = dest->as_pointer_register(); 2296 switch (code) { 2297 case lir_shl: __ z_sllg (d, l, count); break; 2298 case lir_shr: __ z_srag (d, l, count); break; 2299 case lir_ushr: __ z_srlg (d, l, count); break; 2300 default: ShouldNotReachHere(); 2301 } 2302 } else { 2303 ShouldNotReachHere(); 2304 } 2305 } 2306 2307 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2308 if (op->init_check()) { 2309 // Make sure klass is initialized & doesn't have finalizer. 2310 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 2311 Register iklass = op->klass()->as_register(); 2312 add_debug_info_for_null_check_here(op->stub()->info()); 2313 if (Immediate::is_uimm12(state_offset)) { 2314 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 2315 } else { 2316 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 2317 } 2318 __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far. 2319 } 2320 __ allocate_object(op->obj()->as_register(), 2321 op->tmp1()->as_register(), 2322 op->tmp2()->as_register(), 2323 op->header_size(), 2324 op->object_size(), 2325 op->klass()->as_register(), 2326 *op->stub()->entry()); 2327 __ bind(*op->stub()->continuation()); 2328 __ verify_oop(op->obj()->as_register()); 2329 } 2330 2331 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2332 Register len = op->len()->as_register(); 2333 __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend 2334 2335 if (UseSlowPath || 2336 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2337 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2338 __ z_brul(*op->stub()->entry()); 2339 } else { 2340 __ allocate_array(op->obj()->as_register(), 2341 op->len()->as_register(), 2342 op->tmp1()->as_register(), 2343 op->tmp2()->as_register(), 2344 arrayOopDesc::header_size(op->type()), 2345 type2aelembytes(op->type()), 2346 op->klass()->as_register(), 2347 *op->stub()->entry()); 2348 } 2349 __ bind(*op->stub()->continuation()); 2350 } 2351 2352 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 2353 Register recv, Register tmp1, Label* update_done) { 2354 uint i; 2355 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2356 Label next_test; 2357 // See if the receiver is receiver[n]. 2358 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2359 __ z_cg(recv, receiver_addr); 2360 __ z_brne(next_test); 2361 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 2362 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2363 __ branch_optimized(Assembler::bcondAlways, *update_done); 2364 __ bind(next_test); 2365 } 2366 2367 // Didn't find receiver; find next empty slot and fill it in. 2368 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2369 Label next_test; 2370 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2371 __ z_ltg(Z_R0_scratch, recv_addr); 2372 __ z_brne(next_test); 2373 __ z_stg(recv, recv_addr); 2374 __ load_const_optimized(tmp1, DataLayout::counter_increment); 2375 __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo); 2376 __ branch_optimized(Assembler::bcondAlways, *update_done); 2377 __ bind(next_test); 2378 } 2379 } 2380 2381 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2382 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2383 Unimplemented(); 2384 } 2385 2386 void LIR_Assembler::store_parameter(Register r, int param_num) { 2387 assert(param_num >= 0, "invalid num"); 2388 int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame; 2389 assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2390 __ z_stg(r, offset_in_bytes, Z_SP); 2391 } 2392 2393 void LIR_Assembler::store_parameter(jint c, int param_num) { 2394 assert(param_num >= 0, "invalid num"); 2395 int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame; 2396 assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2397 __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true); 2398 } 2399 2400 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2401 // We always need a stub for the failure case. 2402 CodeStub* stub = op->stub(); 2403 Register obj = op->object()->as_register(); 2404 Register k_RInfo = op->tmp1()->as_register(); 2405 Register klass_RInfo = op->tmp2()->as_register(); 2406 Register dst = op->result_opr()->as_register(); 2407 Register Rtmp1 = Z_R1_scratch; 2408 ciKlass* k = op->klass(); 2409 2410 assert(!op->tmp3()->is_valid(), "tmp3's not needed"); 2411 2412 // Check if it needs to be profiled. 2413 ciMethodData* md = NULL; 2414 ciProfileData* data = NULL; 2415 2416 if (op->should_profile()) { 2417 ciMethod* method = op->profiled_method(); 2418 assert(method != NULL, "Should have method"); 2419 int bci = op->profiled_bci(); 2420 md = method->method_data_or_null(); 2421 assert(md != NULL, "Sanity"); 2422 data = md->bci_to_data(bci); 2423 assert(data != NULL, "need data for type check"); 2424 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2425 } 2426 2427 // Temp operands do not overlap with inputs, if this is their last 2428 // use (end of range is exclusive), so a register conflict is possible. 2429 if (obj == k_RInfo) { 2430 k_RInfo = dst; 2431 } else if (obj == klass_RInfo) { 2432 klass_RInfo = dst; 2433 } 2434 assert_different_registers(obj, k_RInfo, klass_RInfo); 2435 2436 if (op->should_profile()) { 2437 NearLabel not_null; 2438 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2439 // Object is null; update MDO and exit. 2440 Register mdo = klass_RInfo; 2441 metadata2reg(md->constant_encoding(), mdo); 2442 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2443 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2444 __ or2mem_8(data_addr, header_bits); 2445 __ branch_optimized(Assembler::bcondAlways, *obj_is_null); 2446 __ bind(not_null); 2447 } else { 2448 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null); 2449 } 2450 2451 NearLabel profile_cast_failure, profile_cast_success; 2452 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2453 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2454 2455 // Patching may screw with our temporaries on sparc, 2456 // so let's do it before loading the class. 2457 if (k->is_loaded()) { 2458 metadata2reg(k->constant_encoding(), k_RInfo); 2459 } else { 2460 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2461 } 2462 assert(obj != k_RInfo, "must be different"); 2463 2464 __ verify_oop(obj); 2465 2466 // Get object class. 2467 // Not a safepoint as obj null check happens earlier. 2468 if (op->fast_check()) { 2469 if (UseCompressedClassPointers) { 2470 __ load_klass(klass_RInfo, obj); 2471 __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); 2472 } else { 2473 __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 2474 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2475 } 2476 // Successful cast, fall through to profile or jump. 2477 } else { 2478 bool need_slow_path = !k->is_loaded() || 2479 ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset())); 2480 intptr_t super_check_offset = k->is_loaded() ? k->super_check_offset() : -1L; 2481 __ load_klass(klass_RInfo, obj); 2482 // Perform the fast part of the checking logic. 2483 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, 2484 (need_slow_path ? success_target : NULL), 2485 failure_target, NULL, 2486 RegisterOrConstant(super_check_offset)); 2487 if (need_slow_path) { 2488 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2489 address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); 2490 store_parameter(klass_RInfo, 0); // sub 2491 store_parameter(k_RInfo, 1); // super 2492 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2493 CHECK_BAILOUT(); 2494 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2495 // Fall through to success case. 2496 } 2497 } 2498 2499 if (op->should_profile()) { 2500 Register mdo = klass_RInfo, recv = k_RInfo; 2501 assert_different_registers(obj, mdo, recv); 2502 __ bind(profile_cast_success); 2503 metadata2reg(md->constant_encoding(), mdo); 2504 __ load_klass(recv, obj); 2505 type_profile_helper(mdo, md, data, recv, Rtmp1, success); 2506 __ branch_optimized(Assembler::bcondAlways, *success); 2507 2508 __ bind(profile_cast_failure); 2509 metadata2reg(md->constant_encoding(), mdo); 2510 __ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1); 2511 __ branch_optimized(Assembler::bcondAlways, *failure); 2512 } else { 2513 __ branch_optimized(Assembler::bcondAlways, *success); 2514 } 2515 } 2516 2517 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2518 LIR_Code code = op->code(); 2519 if (code == lir_store_check) { 2520 Register value = op->object()->as_register(); 2521 Register array = op->array()->as_register(); 2522 Register k_RInfo = op->tmp1()->as_register(); 2523 Register klass_RInfo = op->tmp2()->as_register(); 2524 Register Rtmp1 = Z_R1_scratch; 2525 2526 CodeStub* stub = op->stub(); 2527 2528 // Check if it needs to be profiled. 2529 ciMethodData* md = NULL; 2530 ciProfileData* data = NULL; 2531 2532 assert_different_registers(value, k_RInfo, klass_RInfo); 2533 2534 if (op->should_profile()) { 2535 ciMethod* method = op->profiled_method(); 2536 assert(method != NULL, "Should have method"); 2537 int bci = op->profiled_bci(); 2538 md = method->method_data_or_null(); 2539 assert(md != NULL, "Sanity"); 2540 data = md->bci_to_data(bci); 2541 assert(data != NULL, "need data for type check"); 2542 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2543 } 2544 NearLabel profile_cast_success, profile_cast_failure, done; 2545 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2546 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2547 2548 if (op->should_profile()) { 2549 NearLabel not_null; 2550 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2551 // Object is null; update MDO and exit. 2552 Register mdo = klass_RInfo; 2553 metadata2reg(md->constant_encoding(), mdo); 2554 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2555 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2556 __ or2mem_8(data_addr, header_bits); 2557 __ branch_optimized(Assembler::bcondAlways, done); 2558 __ bind(not_null); 2559 } else { 2560 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done); 2561 } 2562 2563 add_debug_info_for_null_check_here(op->info_for_exception()); 2564 __ load_klass(k_RInfo, array); 2565 __ load_klass(klass_RInfo, value); 2566 2567 // Get instance klass (it's already uncompressed). 2568 __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2569 // Perform the fast part of the checking logic. 2570 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 2571 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2572 address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); 2573 store_parameter(klass_RInfo, 0); // sub 2574 store_parameter(k_RInfo, 1); // super 2575 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2576 CHECK_BAILOUT(); 2577 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2578 // Fall through to success case. 2579 2580 if (op->should_profile()) { 2581 Register mdo = klass_RInfo, recv = k_RInfo; 2582 assert_different_registers(value, mdo, recv); 2583 __ bind(profile_cast_success); 2584 metadata2reg(md->constant_encoding(), mdo); 2585 __ load_klass(recv, value); 2586 type_profile_helper(mdo, md, data, recv, Rtmp1, &done); 2587 __ branch_optimized(Assembler::bcondAlways, done); 2588 2589 __ bind(profile_cast_failure); 2590 metadata2reg(md->constant_encoding(), mdo); 2591 __ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1); 2592 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2593 } 2594 2595 __ bind(done); 2596 } else { 2597 if (code == lir_checkcast) { 2598 Register obj = op->object()->as_register(); 2599 Register dst = op->result_opr()->as_register(); 2600 NearLabel success; 2601 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2602 __ bind(success); 2603 __ lgr_if_needed(dst, obj); 2604 } else { 2605 if (code == lir_instanceof) { 2606 Register obj = op->object()->as_register(); 2607 Register dst = op->result_opr()->as_register(); 2608 NearLabel success, failure, done; 2609 emit_typecheck_helper(op, &success, &failure, &failure); 2610 __ bind(failure); 2611 __ clear_reg(dst); 2612 __ branch_optimized(Assembler::bcondAlways, done); 2613 __ bind(success); 2614 __ load_const_optimized(dst, 1); 2615 __ bind(done); 2616 } else { 2617 ShouldNotReachHere(); 2618 } 2619 } 2620 } 2621 } 2622 2623 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2624 Register addr = op->addr()->as_pointer_register(); 2625 Register t1_cmp = Z_R1_scratch; 2626 if (op->code() == lir_cas_long) { 2627 assert(VM_Version::supports_cx8(), "wrong machine"); 2628 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2629 Register new_value_lo = op->new_value()->as_register_lo(); 2630 __ z_lgr(t1_cmp, cmp_value_lo); 2631 // Perform the compare and swap operation. 2632 __ z_csg(t1_cmp, new_value_lo, 0, addr); 2633 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2634 Register cmp_value = op->cmp_value()->as_register(); 2635 Register new_value = op->new_value()->as_register(); 2636 if (op->code() == lir_cas_obj) { 2637 if (UseCompressedOops) { 2638 t1_cmp = op->tmp1()->as_register(); 2639 Register t2_new = op->tmp2()->as_register(); 2640 assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new); 2641 __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/); 2642 __ oop_encoder(t2_new, new_value, true /*maybe null*/); 2643 __ z_cs(t1_cmp, t2_new, 0, addr); 2644 } else { 2645 __ z_lgr(t1_cmp, cmp_value); 2646 __ z_csg(t1_cmp, new_value, 0, addr); 2647 } 2648 } else { 2649 __ z_lr(t1_cmp, cmp_value); 2650 __ z_cs(t1_cmp, new_value, 0, addr); 2651 } 2652 } else { 2653 ShouldNotReachHere(); // new lir_cas_?? 2654 } 2655 } 2656 2657 void LIR_Assembler::set_24bit_FPU() { 2658 ShouldNotCallThis(); // x86 only 2659 } 2660 2661 void LIR_Assembler::reset_FPU() { 2662 ShouldNotCallThis(); // x86 only 2663 } 2664 2665 void LIR_Assembler::breakpoint() { 2666 Unimplemented(); 2667 // __ breakpoint_trap(); 2668 } 2669 2670 void LIR_Assembler::push(LIR_Opr opr) { 2671 ShouldNotCallThis(); // unused 2672 } 2673 2674 void LIR_Assembler::pop(LIR_Opr opr) { 2675 ShouldNotCallThis(); // unused 2676 } 2677 2678 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2679 Address addr = frame_map()->address_for_monitor_lock(monitor_no); 2680 __ add2reg(dst_opr->as_register(), addr.disp(), addr.base()); 2681 } 2682 2683 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2684 Register obj = op->obj_opr()->as_register(); // May not be an oop. 2685 Register hdr = op->hdr_opr()->as_register(); 2686 Register lock = op->lock_opr()->as_register(); 2687 if (!UseFastLocking) { 2688 __ branch_optimized(Assembler::bcondAlways, *op->stub()->entry()); 2689 } else if (op->code() == lir_lock) { 2690 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2691 // Add debug info for NullPointerException only if one is possible. 2692 if (op->info() != NULL) { 2693 add_debug_info_for_null_check_here(op->info()); 2694 } 2695 __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2696 // done 2697 } else if (op->code() == lir_unlock) { 2698 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2699 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2700 } else { 2701 ShouldNotReachHere(); 2702 } 2703 __ bind(*op->stub()->continuation()); 2704 } 2705 2706 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2707 ciMethod* method = op->profiled_method(); 2708 int bci = op->profiled_bci(); 2709 ciMethod* callee = op->profiled_callee(); 2710 2711 // Update counter for all call types. 2712 ciMethodData* md = method->method_data_or_null(); 2713 assert(md != NULL, "Sanity"); 2714 ciProfileData* data = md->bci_to_data(bci); 2715 assert(data->is_CounterData(), "need CounterData for calls"); 2716 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2717 Register mdo = op->mdo()->as_register(); 2718 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2719 Register tmp1 = op->tmp1()->as_register_lo(); 2720 metadata2reg(md->constant_encoding(), mdo); 2721 2722 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2723 Bytecodes::Code bc = method->java_code_at_bci(bci); 2724 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 2725 // Perform additional virtual call profiling for invokevirtual and 2726 // invokeinterface bytecodes. 2727 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2728 !callee_is_static && // Required for optimized MH invokes. 2729 C1ProfileVirtualCalls) { 2730 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2731 Register recv = op->recv()->as_register(); 2732 assert_different_registers(mdo, tmp1, recv); 2733 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2734 ciKlass* known_klass = op->known_holder(); 2735 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2736 // We know the type that will be seen at this call site; we can 2737 // statically update the MethodData* rather than needing to do 2738 // dynamic tests on the receiver type. 2739 2740 // NOTE: we should probably put a lock around this search to 2741 // avoid collisions by concurrent compilations. 2742 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2743 uint i; 2744 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2745 ciKlass* receiver = vc_data->receiver(i); 2746 if (known_klass->equals(receiver)) { 2747 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2748 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2749 return; 2750 } 2751 } 2752 2753 // Receiver type not found in profile data. Select an empty slot. 2754 2755 // Note that this is less efficient than it should be because it 2756 // always does a write to the receiver part of the 2757 // VirtualCallData rather than just the first time. 2758 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2759 ciKlass* receiver = vc_data->receiver(i); 2760 if (receiver == NULL) { 2761 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2762 metadata2reg(known_klass->constant_encoding(), tmp1); 2763 __ z_stg(tmp1, recv_addr); 2764 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2765 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2766 return; 2767 } 2768 } 2769 } else { 2770 __ load_klass(recv, recv); 2771 NearLabel update_done; 2772 type_profile_helper(mdo, md, data, recv, tmp1, &update_done); 2773 // Receiver did not match any saved receiver and there is no empty row for it. 2774 // Increment total counter to indicate polymorphic case. 2775 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2776 __ bind(update_done); 2777 } 2778 } else { 2779 // static call 2780 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2781 } 2782 } 2783 2784 void LIR_Assembler::align_backward_branch_target() { 2785 __ align(OptoLoopAlignment); 2786 } 2787 2788 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2789 ShouldNotCallThis(); // There are no delay slots on ZARCH_64. 2790 } 2791 2792 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2793 assert(left->is_register(), "can only handle registers"); 2794 2795 if (left->is_single_cpu()) { 2796 __ z_lcr(dest->as_register(), left->as_register()); 2797 } else if (left->is_single_fpu()) { 2798 __ z_lcebr(dest->as_float_reg(), left->as_float_reg()); 2799 } else if (left->is_double_fpu()) { 2800 __ z_lcdbr(dest->as_double_reg(), left->as_double_reg()); 2801 } else { 2802 assert(left->is_double_cpu(), "Must be a long"); 2803 __ z_lcgr(dest->as_register_lo(), left->as_register_lo()); 2804 } 2805 } 2806 2807 void LIR_Assembler::fxch(int i) { 2808 ShouldNotCallThis(); // x86 only 2809 } 2810 2811 void LIR_Assembler::fld(int i) { 2812 ShouldNotCallThis(); // x86 only 2813 } 2814 2815 void LIR_Assembler::ffree(int i) { 2816 ShouldNotCallThis(); // x86 only 2817 } 2818 2819 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2820 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2821 assert(!tmp->is_valid(), "don't need temporary"); 2822 emit_call_c(dest); 2823 CHECK_BAILOUT(); 2824 if (info != NULL) { 2825 add_call_info_here(info); 2826 } 2827 } 2828 2829 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2830 ShouldNotCallThis(); // not needed on ZARCH_64 2831 } 2832 2833 void LIR_Assembler::membar() { 2834 __ z_fence(); 2835 } 2836 2837 void LIR_Assembler::membar_acquire() { 2838 __ z_acquire(); 2839 } 2840 2841 void LIR_Assembler::membar_release() { 2842 __ z_release(); 2843 } 2844 2845 void LIR_Assembler::membar_loadload() { 2846 __ z_acquire(); 2847 } 2848 2849 void LIR_Assembler::membar_storestore() { 2850 __ z_release(); 2851 } 2852 2853 void LIR_Assembler::membar_loadstore() { 2854 __ z_acquire(); 2855 } 2856 2857 void LIR_Assembler::membar_storeload() { 2858 __ z_fence(); 2859 } 2860 2861 void LIR_Assembler::on_spin_wait() { 2862 Unimplemented(); 2863 } 2864 2865 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 2866 LIR_Address* addr = addr_opr->as_address_ptr(); 2867 assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); 2868 __ load_address(dest->as_pointer_register(), as_Address(addr)); 2869 } 2870 2871 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2872 ShouldNotCallThis(); // unused 2873 } 2874 2875 #ifdef ASSERT 2876 // Emit run-time assertion. 2877 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2878 Unimplemented(); 2879 } 2880 #endif 2881 2882 void LIR_Assembler::peephole(LIR_List*) { 2883 // Do nothing for now. 2884 } 2885 2886 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2887 assert(code == lir_xadd, "lir_xchg not supported"); 2888 Address src_addr = as_Address(src->as_address_ptr()); 2889 Register base = src_addr.base(); 2890 intptr_t disp = src_addr.disp(); 2891 if (src_addr.index()->is_valid()) { 2892 // LAA and LAAG do not support index register. 2893 __ load_address(Z_R1_scratch, src_addr); 2894 base = Z_R1_scratch; 2895 disp = 0; 2896 } 2897 if (data->type() == T_INT) { 2898 __ z_laa(dest->as_register(), data->as_register(), disp, base); 2899 } else if (data->type() == T_LONG) { 2900 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 2901 __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base); 2902 } else { 2903 ShouldNotReachHere(); 2904 } 2905 } 2906 2907 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2908 Register obj = op->obj()->as_register(); 2909 Register tmp1 = op->tmp()->as_pointer_register(); 2910 Register tmp2 = Z_R1_scratch; 2911 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2912 ciKlass* exact_klass = op->exact_klass(); 2913 intptr_t current_klass = op->current_klass(); 2914 bool not_null = op->not_null(); 2915 bool no_conflict = op->no_conflict(); 2916 2917 Label update, next, none, null_seen, init_klass; 2918 2919 bool do_null = !not_null; 2920 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2921 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2922 2923 assert(do_null || do_update, "why are we here?"); 2924 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2925 2926 __ verify_oop(obj); 2927 2928 if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) { 2929 __ z_ltgr(tmp1, obj); 2930 } 2931 if (do_null) { 2932 __ z_brnz(update); 2933 if (!TypeEntries::was_null_seen(current_klass)) { 2934 __ z_lg(tmp1, mdo_addr); 2935 __ z_oill(tmp1, TypeEntries::null_seen); 2936 __ z_stg(tmp1, mdo_addr); 2937 } 2938 if (do_update) { 2939 __ z_bru(next); 2940 } 2941 } else { 2942 __ asm_assert_ne("unexpect null obj", __LINE__); 2943 } 2944 2945 __ bind(update); 2946 2947 if (do_update) { 2948 #ifdef ASSERT 2949 if (exact_klass != NULL) { 2950 __ load_klass(tmp1, tmp1); 2951 metadata2reg(exact_klass->constant_encoding(), tmp2); 2952 __ z_cgr(tmp1, tmp2); 2953 __ asm_assert_eq("exact klass and actual klass differ", __LINE__); 2954 } 2955 #endif 2956 2957 Label do_update; 2958 __ z_lg(tmp2, mdo_addr); 2959 2960 if (!no_conflict) { 2961 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2962 if (exact_klass != NULL) { 2963 metadata2reg(exact_klass->constant_encoding(), tmp1); 2964 } else { 2965 __ load_klass(tmp1, tmp1); 2966 } 2967 2968 // Klass seen before: nothing to do (regardless of unknown bit). 2969 __ z_lgr(Z_R0_scratch, tmp2); 2970 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 2971 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 2972 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 2973 2974 // Already unknown: Nothing to do anymore. 2975 __ z_tmll(tmp2, TypeEntries::type_unknown); 2976 __ z_brc(Assembler::bcondAllOne, next); 2977 2978 if (TypeEntries::is_type_none(current_klass)) { 2979 __ z_lgr(Z_R0_scratch, tmp2); 2980 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 2981 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 2982 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); 2983 } 2984 } else { 2985 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2986 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2987 2988 // Already unknown: Nothing to do anymore. 2989 __ z_tmll(tmp2, TypeEntries::type_unknown); 2990 __ z_brc(Assembler::bcondAllOne, next); 2991 } 2992 2993 // Different than before. Cannot keep accurate profile. 2994 __ z_oill(tmp2, TypeEntries::type_unknown); 2995 __ z_bru(do_update); 2996 } else { 2997 // There's a single possible klass at this profile point. 2998 assert(exact_klass != NULL, "should be"); 2999 if (TypeEntries::is_type_none(current_klass)) { 3000 metadata2reg(exact_klass->constant_encoding(), tmp1); 3001 __ z_lgr(Z_R0_scratch, tmp2); 3002 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3003 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3004 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3005 #ifdef ASSERT 3006 { 3007 Label ok; 3008 __ z_lgr(Z_R0_scratch, tmp2); 3009 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3010 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3011 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok); 3012 __ stop("unexpected profiling mismatch"); 3013 __ bind(ok); 3014 } 3015 #endif 3016 3017 } else { 3018 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3019 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3020 3021 // Already unknown: Nothing to do anymore. 3022 __ z_tmll(tmp2, TypeEntries::type_unknown); 3023 __ z_brc(Assembler::bcondAllOne, next); 3024 __ z_oill(tmp2, TypeEntries::type_unknown); 3025 __ z_bru(do_update); 3026 } 3027 } 3028 3029 __ bind(init_klass); 3030 // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3031 __ z_ogr(tmp2, tmp1); 3032 3033 __ bind(do_update); 3034 __ z_stg(tmp2, mdo_addr); 3035 3036 __ bind(next); 3037 } 3038 } 3039 3040 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3041 assert(op->crc()->is_single_cpu(), "crc must be register"); 3042 assert(op->val()->is_single_cpu(), "byte value must be register"); 3043 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3044 Register crc = op->crc()->as_register(); 3045 Register val = op->val()->as_register(); 3046 Register res = op->result_opr()->as_register(); 3047 3048 assert_different_registers(val, crc, res); 3049 3050 __ load_const_optimized(res, StubRoutines::crc_table_addr()); 3051 __ kernel_crc32_singleByteReg(crc, val, res, true); 3052 __ z_lgfr(res, crc); 3053 } 3054 3055 #undef __