1 /* 2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2019, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciInstance.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "memory/universe.hpp" 39 #include "nativeInst_s390.hpp" 40 #include "oops/objArrayKlass.hpp" 41 #include "runtime/frame.inline.hpp" 42 #include "runtime/safepointMechanism.inline.hpp" 43 #include "runtime/sharedRuntime.hpp" 44 #include "vmreg_s390.inline.hpp" 45 46 #define __ _masm-> 47 48 #ifndef PRODUCT 49 #undef __ 50 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)-> 51 #endif 52 53 //------------------------------------------------------------ 54 55 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 56 // Not used on ZARCH_64 57 ShouldNotCallThis(); 58 return false; 59 } 60 61 LIR_Opr LIR_Assembler::receiverOpr() { 62 return FrameMap::Z_R2_oop_opr; 63 } 64 65 LIR_Opr LIR_Assembler::osrBufferPointer() { 66 return FrameMap::Z_R2_opr; 67 } 68 69 int LIR_Assembler::initial_frame_size_in_bytes() const { 70 return in_bytes(frame_map()->framesize_in_bytes()); 71 } 72 73 // Inline cache check: done before the frame is built. 74 // The inline cached class is in Z_inline_cache(Z_R9). 75 // We fetch the class of the receiver and compare it with the cached class. 76 // If they do not match we jump to the slow case. 77 int LIR_Assembler::check_icache() { 78 Register receiver = receiverOpr()->as_register(); 79 int offset = __ offset(); 80 __ inline_cache_check(receiver, Z_inline_cache); 81 return offset; 82 } 83 84 void LIR_Assembler::clinit_barrier(ciMethod* method) { 85 assert(!method->holder()->is_not_initialized(), "initialization should have been started"); 86 87 Label L_skip_barrier; 88 Register klass = Z_R1_scratch; 89 90 metadata2reg(method->holder()->constant_encoding(), klass); 91 __ clinit_barrier(klass, Z_thread, &L_skip_barrier /*L_fast_path*/); 92 93 __ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub()); 94 __ z_br(klass); 95 96 __ bind(L_skip_barrier); 97 } 98 99 void LIR_Assembler::osr_entry() { 100 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 101 // 102 // 1. Create a new compiled activation. 103 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 104 // at the osr_bci; it is not initialized. 105 // 3. Jump to the continuation address in compiled code to resume execution. 106 107 // OSR entry point 108 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 109 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 110 ValueStack* entry_state = osr_entry->end()->state(); 111 int number_of_locks = entry_state->locks_size(); 112 113 // Create a frame for the compiled activation. 114 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 115 116 // OSR buffer is 117 // 118 // locals[nlocals-1..0] 119 // monitors[number_of_locks-1..0] 120 // 121 // Locals is a direct copy of the interpreter frame so in the osr buffer 122 // the first slot in the local array is the last local from the interpreter 123 // and the last slot is local[0] (receiver) from the interpreter 124 // 125 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 126 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 127 // in the interpreter frame (the method lock if a sync method) 128 129 // Initialize monitors in the compiled activation. 130 // I0: pointer to osr buffer 131 // 132 // All other registers are dead at this point and the locals will be 133 // copied into place by code emitted in the IR. 134 135 Register OSR_buf = osrBufferPointer()->as_register(); 136 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 137 int monitor_offset = BytesPerWord * method()->max_locals() + 138 (2 * BytesPerWord) * (number_of_locks - 1); 139 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 140 // the OSR buffer using 2 word entries: first the lock and then 141 // the oop. 142 for (int i = 0; i < number_of_locks; i++) { 143 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 144 // Verify the interpreter's monitor has a non-null object. 145 __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__); 146 // Copy the lock field into the compiled activation. 147 __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); 148 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); 149 __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf); 150 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i)); 151 } 152 } 153 } 154 155 // -------------------------------------------------------------------------------------------- 156 157 address LIR_Assembler::emit_call_c(address a) { 158 __ align_call_far_patchable(__ pc()); 159 address call_addr = __ call_c_opt(a); 160 if (call_addr == NULL) { 161 bailout("const section overflow"); 162 } 163 return call_addr; 164 } 165 166 int LIR_Assembler::emit_exception_handler() { 167 // If the last instruction is a call (typically to do a throw which 168 // is coming at the end after block reordering) the return address 169 // must still point into the code area in order to avoid assertion 170 // failures when searching for the corresponding bci. => Add a nop. 171 // (was bug 5/14/1999 - gri) 172 __ nop(); 173 174 // Generate code for exception handler. 175 address handler_base = __ start_a_stub(exception_handler_size()); 176 if (handler_base == NULL) { 177 // Not enough space left for the handler. 178 bailout("exception handler overflow"); 179 return -1; 180 } 181 182 int offset = code_offset(); 183 184 address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id); 185 address call_addr = emit_call_c(a); 186 CHECK_BAILOUT_(-1); 187 __ should_not_reach_here(); 188 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 189 __ end_a_stub(); 190 191 return offset; 192 } 193 194 // Emit the code to remove the frame from the stack in the exception 195 // unwind path. 196 int LIR_Assembler::emit_unwind_handler() { 197 #ifndef PRODUCT 198 if (CommentedAssembly) { 199 _masm->block_comment("Unwind handler"); 200 } 201 #endif 202 203 int offset = code_offset(); 204 Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. 205 Register Rtmp1 = Z_R11; 206 Register Rtmp2 = Z_R12; 207 208 // Fetch the exception from TLS and clear out exception related thread state. 209 Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset()); 210 Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset()); 211 __ z_lg(Z_EXC_OOP, exc_oop_addr); 212 __ clear_mem(exc_oop_addr, sizeof(oop)); 213 __ clear_mem(exc_pc_addr, sizeof(intptr_t)); 214 215 __ bind(_unwind_handler_entry); 216 __ verify_not_null_oop(Z_EXC_OOP); 217 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 218 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception. 219 } 220 221 // Preform needed unlocking. 222 MonitorExitStub* stub = NULL; 223 if (method()->is_synchronized()) { 224 // Runtime1::monitorexit_id expects lock address in Z_R1_scratch. 225 LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); 226 monitor_address(0, lock); 227 stub = new MonitorExitStub(lock, true, 0); 228 __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); 229 __ bind(*stub->continuation()); 230 } 231 232 if (compilation()->env()->dtrace_method_probes()) { 233 ShouldNotReachHere(); // Not supported. 234 #if 0 235 __ mov(rdi, r15_thread); 236 __ mov_metadata(rsi, method()->constant_encoding()); 237 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 238 #endif 239 } 240 241 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 242 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception. 243 } 244 245 // Remove the activation and dispatch to the unwind handler. 246 __ pop_frame(); 247 __ z_lg(Z_EXC_PC, _z_abi16(return_pc), Z_SP); 248 249 // Z_EXC_OOP: exception oop 250 // Z_EXC_PC: exception pc 251 252 // Dispatch to the unwind logic. 253 __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id)); 254 __ z_br(Z_R5); 255 256 // Emit the slow path assembly. 257 if (stub != NULL) { 258 stub->emit_code(this); 259 } 260 261 return offset; 262 } 263 264 int LIR_Assembler::emit_deopt_handler() { 265 // If the last instruction is a call (typically to do a throw which 266 // is coming at the end after block reordering) the return address 267 // must still point into the code area in order to avoid assertion 268 // failures when searching for the corresponding bci. => Add a nop. 269 // (was bug 5/14/1999 - gri) 270 __ nop(); 271 272 // Generate code for exception handler. 273 address handler_base = __ start_a_stub(deopt_handler_size()); 274 if (handler_base == NULL) { 275 // Not enough space left for the handler. 276 bailout("deopt handler overflow"); 277 return -1; 278 } int offset = code_offset(); 279 // Size must be constant (see HandlerImpl::emit_deopt_handler). 280 __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); 281 __ call(Z_R1_scratch); 282 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 283 __ end_a_stub(); 284 285 return offset; 286 } 287 288 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 289 if (o == NULL) { 290 __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. 291 } else { 292 AddressLiteral a = __ allocate_oop_address(o); 293 bool success = __ load_oop_from_toc(reg, a, reg); 294 if (!success) { 295 bailout("const section overflow"); 296 } 297 } 298 } 299 300 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 301 // Allocate a new index in table to hold the object once it's been patched. 302 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 303 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 304 305 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); 306 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 307 // The NULL will be dynamically patched later so the sequence to 308 // load the address literal must not be optimized. 309 __ load_const(reg, addrlit); 310 311 patching_epilog(patch, lir_patch_normal, reg, info); 312 } 313 314 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { 315 bool success = __ set_metadata_constant(md, reg); 316 if (!success) { 317 bailout("const section overflow"); 318 return; 319 } 320 } 321 322 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 323 // Allocate a new index in table to hold the klass once it's been patched. 324 int index = __ oop_recorder()->allocate_metadata_index(NULL); 325 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 326 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); 327 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 328 // The NULL will be dynamically patched later so the sequence to 329 // load the address literal must not be optimized. 330 __ load_const(reg, addrlit); 331 332 patching_epilog(patch, lir_patch_normal, reg, info); 333 } 334 335 void LIR_Assembler::emit_op3(LIR_Op3* op) { 336 switch (op->code()) { 337 case lir_idiv: 338 case lir_irem: 339 arithmetic_idiv(op->code(), 340 op->in_opr1(), 341 op->in_opr2(), 342 op->in_opr3(), 343 op->result_opr(), 344 op->info()); 345 break; 346 case lir_fmad: { 347 const FloatRegister opr1 = op->in_opr1()->as_double_reg(), 348 opr2 = op->in_opr2()->as_double_reg(), 349 opr3 = op->in_opr3()->as_double_reg(), 350 res = op->result_opr()->as_double_reg(); 351 __ z_madbr(opr3, opr1, opr2); 352 if (res != opr3) { __ z_ldr(res, opr3); } 353 } break; 354 case lir_fmaf: { 355 const FloatRegister opr1 = op->in_opr1()->as_float_reg(), 356 opr2 = op->in_opr2()->as_float_reg(), 357 opr3 = op->in_opr3()->as_float_reg(), 358 res = op->result_opr()->as_float_reg(); 359 __ z_maebr(opr3, opr1, opr2); 360 if (res != opr3) { __ z_ler(res, opr3); } 361 } break; 362 default: ShouldNotReachHere(); break; 363 } 364 } 365 366 367 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 368 #ifdef ASSERT 369 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 370 if (op->block() != NULL) { _branch_target_blocks.append(op->block()); } 371 if (op->ublock() != NULL) { _branch_target_blocks.append(op->ublock()); } 372 #endif 373 374 if (op->cond() == lir_cond_always) { 375 if (op->info() != NULL) { add_debug_info_for_branch(op->info()); } 376 __ branch_optimized(Assembler::bcondAlways, *(op->label())); 377 } else { 378 Assembler::branch_condition acond = Assembler::bcondZero; 379 if (op->code() == lir_cond_float_branch) { 380 assert(op->ublock() != NULL, "must have unordered successor"); 381 __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); 382 } 383 switch (op->cond()) { 384 case lir_cond_equal: acond = Assembler::bcondEqual; break; 385 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break; 386 case lir_cond_less: acond = Assembler::bcondLow; break; 387 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break; 388 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break; 389 case lir_cond_greater: acond = Assembler::bcondHigh; break; 390 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break; 391 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break; 392 default: ShouldNotReachHere(); 393 } 394 __ branch_optimized(acond,*(op->label())); 395 } 396 } 397 398 399 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 400 LIR_Opr src = op->in_opr(); 401 LIR_Opr dest = op->result_opr(); 402 403 switch (op->bytecode()) { 404 case Bytecodes::_i2l: 405 __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT); 406 break; 407 408 case Bytecodes::_l2i: 409 __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG); 410 break; 411 412 case Bytecodes::_i2b: 413 __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT); 414 break; 415 416 case Bytecodes::_i2c: 417 __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT); 418 break; 419 420 case Bytecodes::_i2s: 421 __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT); 422 break; 423 424 case Bytecodes::_f2d: 425 assert(dest->is_double_fpu(), "check"); 426 __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT); 427 break; 428 429 case Bytecodes::_d2f: 430 assert(dest->is_single_fpu(), "check"); 431 __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE); 432 break; 433 434 case Bytecodes::_i2f: 435 __ z_cefbr(dest->as_float_reg(), src->as_register()); 436 break; 437 438 case Bytecodes::_i2d: 439 __ z_cdfbr(dest->as_double_reg(), src->as_register()); 440 break; 441 442 case Bytecodes::_l2f: 443 __ z_cegbr(dest->as_float_reg(), src->as_register_lo()); 444 break; 445 case Bytecodes::_l2d: 446 __ z_cdgbr(dest->as_double_reg(), src->as_register_lo()); 447 break; 448 449 case Bytecodes::_f2i: 450 case Bytecodes::_f2l: { 451 Label done; 452 FloatRegister Rsrc = src->as_float_reg(); 453 Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo()); 454 __ clear_reg(Rdst, true, false); 455 __ z_cebr(Rsrc, Rsrc); 456 __ z_brno(done); // NaN -> 0 457 if (op->bytecode() == Bytecodes::_f2i) { 458 __ z_cfebr(Rdst, Rsrc, Assembler::to_zero); 459 } else { // op->bytecode() == Bytecodes::_f2l 460 __ z_cgebr(Rdst, Rsrc, Assembler::to_zero); 461 } 462 __ bind(done); 463 } 464 break; 465 466 case Bytecodes::_d2i: 467 case Bytecodes::_d2l: { 468 Label done; 469 FloatRegister Rsrc = src->as_double_reg(); 470 Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo()); 471 __ clear_reg(Rdst, true, false); // Don't set CC. 472 __ z_cdbr(Rsrc, Rsrc); 473 __ z_brno(done); // NaN -> 0 474 if (op->bytecode() == Bytecodes::_d2i) { 475 __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero); 476 } else { // Bytecodes::_d2l 477 __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero); 478 } 479 __ bind(done); 480 } 481 break; 482 483 default: ShouldNotReachHere(); 484 } 485 } 486 487 void LIR_Assembler::align_call(LIR_Code code) { 488 // End of call instruction must be 4 byte aligned. 489 int offset = __ offset(); 490 switch (code) { 491 case lir_icvirtual_call: 492 offset += MacroAssembler::load_const_from_toc_size(); 493 // no break 494 case lir_static_call: 495 case lir_optvirtual_call: 496 case lir_dynamic_call: 497 offset += NativeCall::call_far_pcrelative_displacement_offset; 498 break; 499 case lir_virtual_call: // currently, sparc-specific for niagara 500 default: ShouldNotReachHere(); 501 } 502 if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) { 503 __ nop(); 504 } 505 } 506 507 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 508 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 509 "must be aligned (offset=%d)", __ offset()); 510 assert(rtype == relocInfo::none || 511 rtype == relocInfo::opt_virtual_call_type || 512 rtype == relocInfo::static_call_type, "unexpected rtype"); 513 // Prepend each BRASL with a nop. 514 __ relocate(rtype); 515 __ z_nop(); 516 __ z_brasl(Z_R14, op->addr()); 517 add_call_info(code_offset(), op->info()); 518 } 519 520 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 521 address virtual_call_oop_addr = NULL; 522 AddressLiteral empty_ic((address) Universe::non_oop_word()); 523 virtual_call_oop_addr = __ pc(); 524 bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); 525 if (!success) { 526 bailout("const section overflow"); 527 return; 528 } 529 530 // CALL to fixup routine. Fixup routine uses ScopeDesc info 531 // to determine who we intended to call. 532 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 533 call(op, relocInfo::none); 534 } 535 536 // not supported 537 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 538 ShouldNotReachHere(); 539 } 540 541 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 542 if (from_reg != to_reg) __ z_lgr(to_reg, from_reg); 543 } 544 545 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 546 assert(src->is_constant(), "should not call otherwise"); 547 assert(dest->is_stack(), "should not call otherwise"); 548 LIR_Const* c = src->as_constant_ptr(); 549 550 unsigned int lmem = 0; 551 unsigned int lcon = 0; 552 int64_t cbits = 0; 553 Address dest_addr; 554 switch (c->type()) { 555 case T_INT: // fall through 556 case T_FLOAT: 557 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 558 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 559 break; 560 561 case T_ADDRESS: 562 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 563 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 564 break; 565 566 case T_OBJECT: 567 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 568 if (c->as_jobject() == NULL) { 569 __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); 570 } else { 571 jobject2reg(c->as_jobject(), Z_R1_scratch); 572 __ reg2mem_opt(Z_R1_scratch, dest_addr, true); 573 } 574 return; 575 576 case T_LONG: // fall through 577 case T_DOUBLE: 578 dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 579 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 580 break; 581 582 default: 583 ShouldNotReachHere(); 584 } 585 586 __ store_const(dest_addr, cbits, lmem, lcon); 587 } 588 589 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 590 assert(src->is_constant(), "should not call otherwise"); 591 assert(dest->is_address(), "should not call otherwise"); 592 593 LIR_Const* c = src->as_constant_ptr(); 594 Address addr = as_Address(dest->as_address_ptr()); 595 596 int store_offset = -1; 597 598 if (dest->as_address_ptr()->index()->is_valid()) { 599 switch (type) { 600 case T_INT: // fall through 601 case T_FLOAT: 602 __ load_const_optimized(Z_R0_scratch, c->as_jint_bits()); 603 store_offset = __ offset(); 604 if (Immediate::is_uimm12(addr.disp())) { 605 __ z_st(Z_R0_scratch, addr); 606 } else { 607 __ z_sty(Z_R0_scratch, addr); 608 } 609 break; 610 611 case T_ADDRESS: 612 __ load_const_optimized(Z_R1_scratch, c->as_jint_bits()); 613 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 614 break; 615 616 case T_OBJECT: // fall through 617 case T_ARRAY: 618 if (c->as_jobject() == NULL) { 619 if (UseCompressedOops && !wide) { 620 __ clear_reg(Z_R1_scratch, false); 621 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 622 } else { 623 __ clear_reg(Z_R1_scratch, true); 624 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 625 } 626 } else { 627 jobject2reg(c->as_jobject(), Z_R1_scratch); 628 if (UseCompressedOops && !wide) { 629 __ encode_heap_oop(Z_R1_scratch); 630 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 631 } else { 632 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 633 } 634 } 635 assert(store_offset >= 0, "check"); 636 break; 637 638 case T_LONG: // fall through 639 case T_DOUBLE: 640 __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits())); 641 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 642 break; 643 644 case T_BOOLEAN: // fall through 645 case T_BYTE: 646 __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); 647 store_offset = __ offset(); 648 if (Immediate::is_uimm12(addr.disp())) { 649 __ z_stc(Z_R0_scratch, addr); 650 } else { 651 __ z_stcy(Z_R0_scratch, addr); 652 } 653 break; 654 655 case T_CHAR: // fall through 656 case T_SHORT: 657 __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint())); 658 store_offset = __ offset(); 659 if (Immediate::is_uimm12(addr.disp())) { 660 __ z_sth(Z_R0_scratch, addr); 661 } else { 662 __ z_sthy(Z_R0_scratch, addr); 663 } 664 break; 665 666 default: 667 ShouldNotReachHere(); 668 } 669 670 } else { // no index 671 672 unsigned int lmem = 0; 673 unsigned int lcon = 0; 674 int64_t cbits = 0; 675 676 switch (type) { 677 case T_INT: // fall through 678 case T_FLOAT: 679 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 680 break; 681 682 case T_ADDRESS: 683 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 684 break; 685 686 case T_OBJECT: // fall through 687 case T_ARRAY: 688 if (c->as_jobject() == NULL) { 689 if (UseCompressedOops && !wide) { 690 store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); 691 } else { 692 store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8); 693 } 694 } else { 695 jobject2reg(c->as_jobject(), Z_R1_scratch); 696 if (UseCompressedOops && !wide) { 697 __ encode_heap_oop(Z_R1_scratch); 698 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 699 } else { 700 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 701 } 702 } 703 assert(store_offset >= 0, "check"); 704 break; 705 706 case T_LONG: // fall through 707 case T_DOUBLE: 708 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 709 break; 710 711 case T_BOOLEAN: // fall through 712 case T_BYTE: 713 lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint()); 714 break; 715 716 case T_CHAR: // fall through 717 case T_SHORT: 718 lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint()); 719 break; 720 721 default: 722 ShouldNotReachHere(); 723 } 724 725 if (store_offset == -1) { 726 store_offset = __ store_const(addr, cbits, lmem, lcon); 727 assert(store_offset >= 0, "check"); 728 } 729 } 730 731 if (info != NULL) { 732 add_debug_info_for_null_check(store_offset, info); 733 } 734 } 735 736 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 737 assert(src->is_constant(), "should not call otherwise"); 738 assert(dest->is_register(), "should not call otherwise"); 739 LIR_Const* c = src->as_constant_ptr(); 740 741 switch (c->type()) { 742 case T_INT: { 743 assert(patch_code == lir_patch_none, "no patching handled here"); 744 __ load_const_optimized(dest->as_register(), c->as_jint()); 745 break; 746 } 747 748 case T_ADDRESS: { 749 assert(patch_code == lir_patch_none, "no patching handled here"); 750 __ load_const_optimized(dest->as_register(), c->as_jint()); 751 break; 752 } 753 754 case T_LONG: { 755 assert(patch_code == lir_patch_none, "no patching handled here"); 756 __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong()); 757 break; 758 } 759 760 case T_OBJECT: { 761 if (patch_code != lir_patch_none) { 762 jobject2reg_with_patching(dest->as_register(), info); 763 } else { 764 jobject2reg(c->as_jobject(), dest->as_register()); 765 } 766 break; 767 } 768 769 case T_METADATA: { 770 if (patch_code != lir_patch_none) { 771 klass2reg_with_patching(dest->as_register(), info); 772 } else { 773 metadata2reg(c->as_metadata(), dest->as_register()); 774 } 775 break; 776 } 777 778 case T_FLOAT: { 779 Register toc_reg = Z_R1_scratch; 780 __ load_toc(toc_reg); 781 address const_addr = __ float_constant(c->as_jfloat()); 782 if (const_addr == NULL) { 783 bailout("const section overflow"); 784 break; 785 } 786 int displ = const_addr - _masm->code()->consts()->start(); 787 if (dest->is_single_fpu()) { 788 __ z_ley(dest->as_float_reg(), displ, toc_reg); 789 } else { 790 assert(dest->is_single_cpu(), "Must be a cpu register."); 791 __ z_ly(dest->as_register(), displ, toc_reg); 792 } 793 } 794 break; 795 796 case T_DOUBLE: { 797 Register toc_reg = Z_R1_scratch; 798 __ load_toc(toc_reg); 799 address const_addr = __ double_constant(c->as_jdouble()); 800 if (const_addr == NULL) { 801 bailout("const section overflow"); 802 break; 803 } 804 int displ = const_addr - _masm->code()->consts()->start(); 805 if (dest->is_double_fpu()) { 806 __ z_ldy(dest->as_double_reg(), displ, toc_reg); 807 } else { 808 assert(dest->is_double_cpu(), "Must be a long register."); 809 __ z_lg(dest->as_register_lo(), displ, toc_reg); 810 } 811 } 812 break; 813 814 default: 815 ShouldNotReachHere(); 816 } 817 } 818 819 Address LIR_Assembler::as_Address(LIR_Address* addr) { 820 if (addr->base()->is_illegal()) { 821 Unimplemented(); 822 } 823 824 Register base = addr->base()->as_pointer_register(); 825 826 if (addr->index()->is_illegal()) { 827 return Address(base, addr->disp()); 828 } else if (addr->index()->is_cpu_register()) { 829 Register index = addr->index()->as_pointer_register(); 830 return Address(base, index, addr->disp()); 831 } else if (addr->index()->is_constant()) { 832 intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp(); 833 return Address(base, addr_offset); 834 } else { 835 ShouldNotReachHere(); 836 return Address(); 837 } 838 } 839 840 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 841 switch (type) { 842 case T_INT: 843 case T_FLOAT: { 844 Register tmp = Z_R1_scratch; 845 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 846 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 847 __ mem2reg_opt(tmp, from, false); 848 __ reg2mem_opt(tmp, to, false); 849 break; 850 } 851 case T_ADDRESS: 852 case T_OBJECT: { 853 Register tmp = Z_R1_scratch; 854 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 855 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 856 __ mem2reg_opt(tmp, from, true); 857 __ reg2mem_opt(tmp, to, true); 858 break; 859 } 860 case T_LONG: 861 case T_DOUBLE: { 862 Register tmp = Z_R1_scratch; 863 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 864 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 865 __ mem2reg_opt(tmp, from, true); 866 __ reg2mem_opt(tmp, to, true); 867 break; 868 } 869 870 default: 871 ShouldNotReachHere(); 872 } 873 } 874 875 // 4-byte accesses only! Don't use it to access 8 bytes! 876 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 877 ShouldNotCallThis(); 878 return 0; // unused 879 } 880 881 // 4-byte accesses only! Don't use it to access 8 bytes! 882 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 883 ShouldNotCallThis(); 884 return 0; // unused 885 } 886 887 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, 888 CodeEmitInfo* info, bool wide, bool unaligned) { 889 890 assert(type != T_METADATA, "load of metadata ptr not supported"); 891 LIR_Address* addr = src_opr->as_address_ptr(); 892 LIR_Opr to_reg = dest; 893 894 Register src = addr->base()->as_pointer_register(); 895 Register disp_reg = Z_R0; 896 int disp_value = addr->disp(); 897 bool needs_patching = (patch_code != lir_patch_none); 898 899 if (addr->base()->type() == T_OBJECT) { 900 __ verify_oop(src); 901 } 902 903 PatchingStub* patch = NULL; 904 if (needs_patching) { 905 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 906 assert(!to_reg->is_double_cpu() || 907 patch_code == lir_patch_none || 908 patch_code == lir_patch_normal, "patching doesn't match register"); 909 } 910 911 if (addr->index()->is_illegal()) { 912 if (!Immediate::is_simm20(disp_value)) { 913 if (needs_patching) { 914 __ load_const(Z_R1_scratch, (intptr_t)0); 915 } else { 916 __ load_const_optimized(Z_R1_scratch, disp_value); 917 } 918 disp_reg = Z_R1_scratch; 919 disp_value = 0; 920 } 921 } else { 922 if (!Immediate::is_simm20(disp_value)) { 923 __ load_const_optimized(Z_R1_scratch, disp_value); 924 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 925 disp_reg = Z_R1_scratch; 926 disp_value = 0; 927 } 928 disp_reg = addr->index()->as_pointer_register(); 929 } 930 931 // Remember the offset of the load. The patching_epilog must be done 932 // before the call to add_debug_info, otherwise the PcDescs don't get 933 // entered in increasing order. 934 int offset = code_offset(); 935 936 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 937 938 bool short_disp = Immediate::is_uimm12(disp_value); 939 940 switch (type) { 941 case T_BOOLEAN: // fall through 942 case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break; 943 case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break; 944 case T_SHORT : 945 if (short_disp) { 946 __ z_lh(dest->as_register(), disp_value, disp_reg, src); 947 } else { 948 __ z_lhy(dest->as_register(), disp_value, disp_reg, src); 949 } 950 break; 951 case T_INT : 952 if (short_disp) { 953 __ z_l(dest->as_register(), disp_value, disp_reg, src); 954 } else { 955 __ z_ly(dest->as_register(), disp_value, disp_reg, src); 956 } 957 break; 958 case T_ADDRESS: 959 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 960 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 961 __ decode_klass_not_null(dest->as_register()); 962 } else { 963 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 964 } 965 break; 966 case T_ARRAY : // fall through 967 case T_OBJECT: 968 { 969 if (UseCompressedOops && !wide) { 970 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 971 __ oop_decoder(dest->as_register(), dest->as_register(), true); 972 } else { 973 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 974 } 975 __ verify_oop(dest->as_register()); 976 break; 977 } 978 case T_FLOAT: 979 if (short_disp) { 980 __ z_le(dest->as_float_reg(), disp_value, disp_reg, src); 981 } else { 982 __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src); 983 } 984 break; 985 case T_DOUBLE: 986 if (short_disp) { 987 __ z_ld(dest->as_double_reg(), disp_value, disp_reg, src); 988 } else { 989 __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src); 990 } 991 break; 992 case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break; 993 default : ShouldNotReachHere(); 994 } 995 996 if (patch != NULL) { 997 patching_epilog(patch, patch_code, src, info); 998 } 999 if (info != NULL) add_debug_info_for_null_check(offset, info); 1000 } 1001 1002 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1003 assert(src->is_stack(), "should not call otherwise"); 1004 assert(dest->is_register(), "should not call otherwise"); 1005 1006 if (dest->is_single_cpu()) { 1007 if (is_reference_type(type)) { 1008 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 1009 __ verify_oop(dest->as_register()); 1010 } else if (type == T_METADATA) { 1011 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 1012 } else { 1013 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false); 1014 } 1015 } else if (dest->is_double_cpu()) { 1016 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix()); 1017 __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true); 1018 } else if (dest->is_single_fpu()) { 1019 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 1020 __ mem2freg_opt(dest->as_float_reg(), src_addr, false); 1021 } else if (dest->is_double_fpu()) { 1022 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 1023 __ mem2freg_opt(dest->as_double_reg(), src_addr, true); 1024 } else { 1025 ShouldNotReachHere(); 1026 } 1027 } 1028 1029 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1030 assert(src->is_register(), "should not call otherwise"); 1031 assert(dest->is_stack(), "should not call otherwise"); 1032 1033 if (src->is_single_cpu()) { 1034 const Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 1035 if (is_reference_type(type)) { 1036 __ verify_oop(src->as_register()); 1037 __ reg2mem_opt(src->as_register(), dst, true); 1038 } else if (type == T_METADATA) { 1039 __ reg2mem_opt(src->as_register(), dst, true); 1040 } else { 1041 __ reg2mem_opt(src->as_register(), dst, false); 1042 } 1043 } else if (src->is_double_cpu()) { 1044 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix()); 1045 __ reg2mem_opt(src->as_register_lo(), dstLO, true); 1046 } else if (src->is_single_fpu()) { 1047 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1048 __ freg2mem_opt(src->as_float_reg(), dst_addr, false); 1049 } else if (src->is_double_fpu()) { 1050 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1051 __ freg2mem_opt(src->as_double_reg(), dst_addr, true); 1052 } else { 1053 ShouldNotReachHere(); 1054 } 1055 } 1056 1057 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1058 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1059 if (from_reg->is_double_fpu()) { 1060 // double to double moves 1061 assert(to_reg->is_double_fpu(), "should match"); 1062 __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg()); 1063 } else { 1064 // float to float moves 1065 assert(to_reg->is_single_fpu(), "should match"); 1066 __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg()); 1067 } 1068 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1069 if (from_reg->is_double_cpu()) { 1070 __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1071 } else if (to_reg->is_double_cpu()) { 1072 // int to int moves 1073 __ z_lgr(to_reg->as_register_lo(), from_reg->as_register()); 1074 } else { 1075 // int to int moves 1076 __ z_lgr(to_reg->as_register(), from_reg->as_register()); 1077 } 1078 } else { 1079 ShouldNotReachHere(); 1080 } 1081 if (is_reference_type(to_reg->type())) { 1082 __ verify_oop(to_reg->as_register()); 1083 } 1084 } 1085 1086 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, 1087 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1088 bool wide, bool unaligned) { 1089 assert(type != T_METADATA, "store of metadata ptr not supported"); 1090 LIR_Address* addr = dest_opr->as_address_ptr(); 1091 1092 Register dest = addr->base()->as_pointer_register(); 1093 Register disp_reg = Z_R0; 1094 int disp_value = addr->disp(); 1095 bool needs_patching = (patch_code != lir_patch_none); 1096 1097 if (addr->base()->is_oop_register()) { 1098 __ verify_oop(dest); 1099 } 1100 1101 PatchingStub* patch = NULL; 1102 if (needs_patching) { 1103 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1104 assert(!from->is_double_cpu() || 1105 patch_code == lir_patch_none || 1106 patch_code == lir_patch_normal, "patching doesn't match register"); 1107 } 1108 1109 assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption"); 1110 if (addr->index()->is_illegal()) { 1111 if (!Immediate::is_simm20(disp_value)) { 1112 if (needs_patching) { 1113 __ load_const(Z_R1_scratch, (intptr_t)0); 1114 } else { 1115 __ load_const_optimized(Z_R1_scratch, disp_value); 1116 } 1117 disp_reg = Z_R1_scratch; 1118 disp_value = 0; 1119 } 1120 } else { 1121 if (!Immediate::is_simm20(disp_value)) { 1122 __ load_const_optimized(Z_R1_scratch, disp_value); 1123 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 1124 disp_reg = Z_R1_scratch; 1125 disp_value = 0; 1126 } 1127 disp_reg = addr->index()->as_pointer_register(); 1128 } 1129 1130 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 1131 1132 if (is_reference_type(type)) { 1133 __ verify_oop(from->as_register()); 1134 } 1135 1136 bool short_disp = Immediate::is_uimm12(disp_value); 1137 1138 // Remember the offset of the store. The patching_epilog must be done 1139 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1140 // entered in increasing order. 1141 int offset = code_offset(); 1142 switch (type) { 1143 case T_BOOLEAN: // fall through 1144 case T_BYTE : 1145 if (short_disp) { 1146 __ z_stc(from->as_register(), disp_value, disp_reg, dest); 1147 } else { 1148 __ z_stcy(from->as_register(), disp_value, disp_reg, dest); 1149 } 1150 break; 1151 case T_CHAR : // fall through 1152 case T_SHORT : 1153 if (short_disp) { 1154 __ z_sth(from->as_register(), disp_value, disp_reg, dest); 1155 } else { 1156 __ z_sthy(from->as_register(), disp_value, disp_reg, dest); 1157 } 1158 break; 1159 case T_INT : 1160 if (short_disp) { 1161 __ z_st(from->as_register(), disp_value, disp_reg, dest); 1162 } else { 1163 __ z_sty(from->as_register(), disp_value, disp_reg, dest); 1164 } 1165 break; 1166 case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break; 1167 case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break; 1168 break; 1169 case T_ARRAY : // fall through 1170 case T_OBJECT: 1171 { 1172 if (UseCompressedOops && !wide) { 1173 Register compressed_src = Z_R14; 1174 __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true); 1175 offset = code_offset(); 1176 if (short_disp) { 1177 __ z_st(compressed_src, disp_value, disp_reg, dest); 1178 } else { 1179 __ z_sty(compressed_src, disp_value, disp_reg, dest); 1180 } 1181 } else { 1182 __ z_stg(from->as_register(), disp_value, disp_reg, dest); 1183 } 1184 break; 1185 } 1186 case T_FLOAT : 1187 if (short_disp) { 1188 __ z_ste(from->as_float_reg(), disp_value, disp_reg, dest); 1189 } else { 1190 __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest); 1191 } 1192 break; 1193 case T_DOUBLE: 1194 if (short_disp) { 1195 __ z_std(from->as_double_reg(), disp_value, disp_reg, dest); 1196 } else { 1197 __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest); 1198 } 1199 break; 1200 default: ShouldNotReachHere(); 1201 } 1202 1203 if (patch != NULL) { 1204 patching_epilog(patch, patch_code, dest, info); 1205 } 1206 1207 if (info != NULL) add_debug_info_for_null_check(offset, info); 1208 } 1209 1210 1211 void LIR_Assembler::return_op(LIR_Opr result) { 1212 assert(result->is_illegal() || 1213 (result->is_single_cpu() && result->as_register() == Z_R2) || 1214 (result->is_double_cpu() && result->as_register_lo() == Z_R2) || 1215 (result->is_single_fpu() && result->as_float_reg() == Z_F0) || 1216 (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention"); 1217 1218 if (SafepointMechanism::uses_thread_local_poll()) { 1219 __ z_lg(Z_R1_scratch, Address(Z_thread, Thread::polling_page_offset())); 1220 } else { 1221 AddressLiteral pp(os::get_polling_page()); 1222 __ load_const_optimized(Z_R1_scratch, pp); 1223 } 1224 1225 // Pop the frame before the safepoint code. 1226 __ pop_frame_restore_retPC(initial_frame_size_in_bytes()); 1227 1228 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1229 __ reserved_stack_check(Z_R14); 1230 } 1231 1232 // We need to mark the code position where the load from the safepoint 1233 // polling page was emitted as relocInfo::poll_return_type here. 1234 __ relocate(relocInfo::poll_return_type); 1235 __ load_from_polling_page(Z_R1_scratch); 1236 1237 __ z_br(Z_R14); // Return to caller. 1238 } 1239 1240 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1241 const Register poll_addr = tmp->as_register_lo(); 1242 if (SafepointMechanism::uses_thread_local_poll()) { 1243 __ z_lg(poll_addr, Address(Z_thread, Thread::polling_page_offset())); 1244 } else { 1245 AddressLiteral pp(os::get_polling_page()); 1246 __ load_const_optimized(poll_addr, pp); 1247 } 1248 guarantee(info != NULL, "Shouldn't be NULL"); 1249 add_debug_info_for_branch(info); 1250 int offset = __ offset(); 1251 __ relocate(relocInfo::poll_type); 1252 __ load_from_polling_page(poll_addr); 1253 return offset; 1254 } 1255 1256 void LIR_Assembler::emit_static_call_stub() { 1257 1258 // Stub is fixed up when the corresponding call is converted from calling 1259 // compiled code to calling interpreted code. 1260 1261 address call_pc = __ pc(); 1262 address stub = __ start_a_stub(call_stub_size()); 1263 if (stub == NULL) { 1264 bailout("static call stub overflow"); 1265 return; 1266 } 1267 1268 int start = __ offset(); 1269 1270 __ relocate(static_stub_Relocation::spec(call_pc)); 1271 1272 // See also Matcher::interpreter_method_oop_reg(). 1273 AddressLiteral meta = __ allocate_metadata_address(NULL); 1274 bool success = __ load_const_from_toc(Z_method, meta); 1275 1276 __ set_inst_mark(); 1277 AddressLiteral a((address)-1); 1278 success = success && __ load_const_from_toc(Z_R1, a); 1279 if (!success) { 1280 bailout("const section overflow"); 1281 return; 1282 } 1283 1284 __ z_br(Z_R1); 1285 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1286 __ end_a_stub(); // Update current stubs pointer and restore insts_end. 1287 } 1288 1289 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1290 bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual; 1291 if (opr1->is_single_cpu()) { 1292 Register reg1 = opr1->as_register(); 1293 if (opr2->is_single_cpu()) { 1294 // cpu register - cpu register 1295 if (is_reference_type(opr1->type())) { 1296 __ z_clgr(reg1, opr2->as_register()); 1297 } else { 1298 assert(! is_reference_type(opr2->type()), "cmp int, oop?"); 1299 if (unsigned_comp) { 1300 __ z_clr(reg1, opr2->as_register()); 1301 } else { 1302 __ z_cr(reg1, opr2->as_register()); 1303 } 1304 } 1305 } else if (opr2->is_stack()) { 1306 // cpu register - stack 1307 if (is_reference_type(opr1->type())) { 1308 __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1309 } else { 1310 if (unsigned_comp) { 1311 __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1312 } else { 1313 __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1314 } 1315 } 1316 } else if (opr2->is_constant()) { 1317 // cpu register - constant 1318 LIR_Const* c = opr2->as_constant_ptr(); 1319 if (c->type() == T_INT) { 1320 if (unsigned_comp) { 1321 __ z_clfi(reg1, c->as_jint()); 1322 } else { 1323 __ z_cfi(reg1, c->as_jint()); 1324 } 1325 } else if (is_reference_type(c->type())) { 1326 // In 64bit oops are single register. 1327 jobject o = c->as_jobject(); 1328 if (o == NULL) { 1329 __ z_ltgr(reg1, reg1); 1330 } else { 1331 jobject2reg(o, Z_R1_scratch); 1332 __ z_cgr(reg1, Z_R1_scratch); 1333 } 1334 } else { 1335 fatal("unexpected type: %s", basictype_to_str(c->type())); 1336 } 1337 // cpu register - address 1338 } else if (opr2->is_address()) { 1339 if (op->info() != NULL) { 1340 add_debug_info_for_null_check_here(op->info()); 1341 } 1342 if (unsigned_comp) { 1343 __ z_cly(reg1, as_Address(opr2->as_address_ptr())); 1344 } else { 1345 __ z_cy(reg1, as_Address(opr2->as_address_ptr())); 1346 } 1347 } else { 1348 ShouldNotReachHere(); 1349 } 1350 1351 } else if (opr1->is_double_cpu()) { 1352 assert(!unsigned_comp, "unexpected"); 1353 Register xlo = opr1->as_register_lo(); 1354 Register xhi = opr1->as_register_hi(); 1355 if (opr2->is_double_cpu()) { 1356 __ z_cgr(xlo, opr2->as_register_lo()); 1357 } else if (opr2->is_constant()) { 1358 // cpu register - constant 0 1359 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 1360 __ z_ltgr(xlo, xlo); 1361 } else { 1362 ShouldNotReachHere(); 1363 } 1364 1365 } else if (opr1->is_single_fpu()) { 1366 if (opr2->is_single_fpu()) { 1367 __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg()); 1368 } else { 1369 // stack slot 1370 Address addr = frame_map()->address_for_slot(opr2->single_stack_ix()); 1371 if (Immediate::is_uimm12(addr.disp())) { 1372 __ z_ceb(opr1->as_float_reg(), addr); 1373 } else { 1374 __ z_ley(Z_fscratch_1, addr); 1375 __ z_cebr(opr1->as_float_reg(), Z_fscratch_1); 1376 } 1377 } 1378 } else if (opr1->is_double_fpu()) { 1379 if (opr2->is_double_fpu()) { 1380 __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg()); 1381 } else { 1382 // stack slot 1383 Address addr = frame_map()->address_for_slot(opr2->double_stack_ix()); 1384 if (Immediate::is_uimm12(addr.disp())) { 1385 __ z_cdb(opr1->as_double_reg(), addr); 1386 } else { 1387 __ z_ldy(Z_fscratch_1, addr); 1388 __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1); 1389 } 1390 } 1391 } else { 1392 ShouldNotReachHere(); 1393 } 1394 } 1395 1396 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1397 Label done; 1398 Register dreg = dst->as_register(); 1399 1400 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1401 assert((left->is_single_fpu() && right->is_single_fpu()) || 1402 (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types"); 1403 bool is_single = left->is_single_fpu(); 1404 bool is_unordered_less = (code == lir_ucmp_fd2i); 1405 FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg(); 1406 FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg(); 1407 if (is_single) { 1408 __ z_cebr(lreg, rreg); 1409 } else { 1410 __ z_cdbr(lreg, rreg); 1411 } 1412 if (VM_Version::has_LoadStoreConditional()) { 1413 Register one = Z_R0_scratch; 1414 Register minus_one = Z_R1_scratch; 1415 __ z_lghi(minus_one, -1); 1416 __ z_lghi(one, 1); 1417 __ z_lghi(dreg, 0); 1418 __ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered); 1419 __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow); 1420 } else { 1421 __ clear_reg(dreg, true, false); 1422 __ z_bre(done); // if (left == right) dst = 0 1423 1424 // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1 1425 __ z_lhi(dreg, 1); 1426 __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done); 1427 1428 // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1 1429 __ z_lhi(dreg, -1); 1430 } 1431 } else { 1432 assert(code == lir_cmp_l2i, "check"); 1433 if (VM_Version::has_LoadStoreConditional()) { 1434 Register one = Z_R0_scratch; 1435 Register minus_one = Z_R1_scratch; 1436 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1437 __ z_lghi(minus_one, -1); 1438 __ z_lghi(one, 1); 1439 __ z_lghi(dreg, 0); 1440 __ z_locgr(dreg, one, Assembler::bcondHigh); 1441 __ z_locgr(dreg, minus_one, Assembler::bcondLow); 1442 } else { 1443 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1444 __ z_lghi(dreg, 0); // eq value 1445 __ z_bre(done); 1446 __ z_lghi(dreg, 1); // gt value 1447 __ z_brh(done); 1448 __ z_lghi(dreg, -1); // lt value 1449 } 1450 } 1451 __ bind(done); 1452 } 1453 1454 // result = condition ? opr1 : opr2 1455 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1456 Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual; 1457 switch (condition) { 1458 case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break; 1459 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break; 1460 case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break; 1461 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1462 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1463 case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break; 1464 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1465 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1466 default: ShouldNotReachHere(); 1467 } 1468 1469 if (opr1->is_cpu_register()) { 1470 reg2reg(opr1, result); 1471 } else if (opr1->is_stack()) { 1472 stack2reg(opr1, result, result->type()); 1473 } else if (opr1->is_constant()) { 1474 const2reg(opr1, result, lir_patch_none, NULL); 1475 } else { 1476 ShouldNotReachHere(); 1477 } 1478 1479 if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) { 1480 // Optimized version that does not require a branch. 1481 if (opr2->is_single_cpu()) { 1482 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1483 __ z_locgr(result->as_register(), opr2->as_register(), ncond); 1484 } else if (opr2->is_double_cpu()) { 1485 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1486 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1487 __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond); 1488 } else if (opr2->is_single_stack()) { 1489 __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond); 1490 } else if (opr2->is_double_stack()) { 1491 __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond); 1492 } else { 1493 ShouldNotReachHere(); 1494 } 1495 } else { 1496 Label skip; 1497 __ z_brc(acond, skip); 1498 if (opr2->is_cpu_register()) { 1499 reg2reg(opr2, result); 1500 } else if (opr2->is_stack()) { 1501 stack2reg(opr2, result, result->type()); 1502 } else if (opr2->is_constant()) { 1503 const2reg(opr2, result, lir_patch_none, NULL); 1504 } else { 1505 ShouldNotReachHere(); 1506 } 1507 __ bind(skip); 1508 } 1509 } 1510 1511 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1512 CodeEmitInfo* info, bool pop_fpu_stack) { 1513 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1514 1515 if (left->is_single_cpu()) { 1516 assert(left == dest, "left and dest must be equal"); 1517 Register lreg = left->as_register(); 1518 1519 if (right->is_single_cpu()) { 1520 // cpu register - cpu register 1521 Register rreg = right->as_register(); 1522 switch (code) { 1523 case lir_add: __ z_ar (lreg, rreg); break; 1524 case lir_sub: __ z_sr (lreg, rreg); break; 1525 case lir_mul: __ z_msr(lreg, rreg); break; 1526 default: ShouldNotReachHere(); 1527 } 1528 1529 } else if (right->is_stack()) { 1530 // cpu register - stack 1531 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1532 switch (code) { 1533 case lir_add: __ z_ay(lreg, raddr); break; 1534 case lir_sub: __ z_sy(lreg, raddr); break; 1535 default: ShouldNotReachHere(); 1536 } 1537 1538 } else if (right->is_constant()) { 1539 // cpu register - constant 1540 jint c = right->as_constant_ptr()->as_jint(); 1541 switch (code) { 1542 case lir_add: __ z_agfi(lreg, c); break; 1543 case lir_sub: __ z_agfi(lreg, -c); break; // note: -min_jint == min_jint 1544 case lir_mul: __ z_msfi(lreg, c); break; 1545 default: ShouldNotReachHere(); 1546 } 1547 1548 } else { 1549 ShouldNotReachHere(); 1550 } 1551 1552 } else if (left->is_double_cpu()) { 1553 assert(left == dest, "left and dest must be equal"); 1554 Register lreg_lo = left->as_register_lo(); 1555 Register lreg_hi = left->as_register_hi(); 1556 1557 if (right->is_double_cpu()) { 1558 // cpu register - cpu register 1559 Register rreg_lo = right->as_register_lo(); 1560 Register rreg_hi = right->as_register_hi(); 1561 assert_different_registers(lreg_lo, rreg_lo); 1562 switch (code) { 1563 case lir_add: 1564 __ z_agr(lreg_lo, rreg_lo); 1565 break; 1566 case lir_sub: 1567 __ z_sgr(lreg_lo, rreg_lo); 1568 break; 1569 case lir_mul: 1570 __ z_msgr(lreg_lo, rreg_lo); 1571 break; 1572 default: 1573 ShouldNotReachHere(); 1574 } 1575 1576 } else if (right->is_constant()) { 1577 // cpu register - constant 1578 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1579 switch (code) { 1580 case lir_add: __ z_agfi(lreg_lo, c); break; 1581 case lir_sub: 1582 if (c != min_jint) { 1583 __ z_agfi(lreg_lo, -c); 1584 } else { 1585 // -min_jint cannot be represented as simm32 in z_agfi 1586 // min_jint sign extended: 0xffffffff80000000 1587 // -min_jint as 64 bit integer: 0x0000000080000000 1588 // 0x80000000 can be represented as uimm32 in z_algfi 1589 // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000 1590 __ z_algfi(lreg_lo, UCONST64(0x80000000)); 1591 } 1592 break; 1593 case lir_mul: __ z_msgfi(lreg_lo, c); break; 1594 default: 1595 ShouldNotReachHere(); 1596 } 1597 1598 } else { 1599 ShouldNotReachHere(); 1600 } 1601 1602 } else if (left->is_single_fpu()) { 1603 assert(left == dest, "left and dest must be equal"); 1604 FloatRegister lreg = left->as_float_reg(); 1605 FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg; 1606 Address raddr; 1607 1608 if (rreg == fnoreg) { 1609 assert(right->is_single_stack(), "constants should be loaded into register"); 1610 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1611 if (!Immediate::is_uimm12(raddr.disp())) { 1612 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false); 1613 } 1614 } 1615 1616 if (rreg != fnoreg) { 1617 switch (code) { 1618 case lir_add: __ z_aebr(lreg, rreg); break; 1619 case lir_sub: __ z_sebr(lreg, rreg); break; 1620 case lir_mul_strictfp: // fall through 1621 case lir_mul: __ z_meebr(lreg, rreg); break; 1622 case lir_div_strictfp: // fall through 1623 case lir_div: __ z_debr(lreg, rreg); break; 1624 default: ShouldNotReachHere(); 1625 } 1626 } else { 1627 switch (code) { 1628 case lir_add: __ z_aeb(lreg, raddr); break; 1629 case lir_sub: __ z_seb(lreg, raddr); break; 1630 case lir_mul_strictfp: // fall through 1631 case lir_mul: __ z_meeb(lreg, raddr); break; 1632 case lir_div_strictfp: // fall through 1633 case lir_div: __ z_deb(lreg, raddr); break; 1634 default: ShouldNotReachHere(); 1635 } 1636 } 1637 } else if (left->is_double_fpu()) { 1638 assert(left == dest, "left and dest must be equal"); 1639 FloatRegister lreg = left->as_double_reg(); 1640 FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg; 1641 Address raddr; 1642 1643 if (rreg == fnoreg) { 1644 assert(right->is_double_stack(), "constants should be loaded into register"); 1645 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 1646 if (!Immediate::is_uimm12(raddr.disp())) { 1647 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true); 1648 } 1649 } 1650 1651 if (rreg != fnoreg) { 1652 switch (code) { 1653 case lir_add: __ z_adbr(lreg, rreg); break; 1654 case lir_sub: __ z_sdbr(lreg, rreg); break; 1655 case lir_mul_strictfp: // fall through 1656 case lir_mul: __ z_mdbr(lreg, rreg); break; 1657 case lir_div_strictfp: // fall through 1658 case lir_div: __ z_ddbr(lreg, rreg); break; 1659 default: ShouldNotReachHere(); 1660 } 1661 } else { 1662 switch (code) { 1663 case lir_add: __ z_adb(lreg, raddr); break; 1664 case lir_sub: __ z_sdb(lreg, raddr); break; 1665 case lir_mul_strictfp: // fall through 1666 case lir_mul: __ z_mdb(lreg, raddr); break; 1667 case lir_div_strictfp: // fall through 1668 case lir_div: __ z_ddb(lreg, raddr); break; 1669 default: ShouldNotReachHere(); 1670 } 1671 } 1672 } else if (left->is_address()) { 1673 assert(left == dest, "left and dest must be equal"); 1674 assert(code == lir_add, "unsupported operation"); 1675 assert(right->is_constant(), "unsupported operand"); 1676 jint c = right->as_constant_ptr()->as_jint(); 1677 LIR_Address* lir_addr = left->as_address_ptr(); 1678 Address addr = as_Address(lir_addr); 1679 switch (lir_addr->type()) { 1680 case T_INT: 1681 __ add2mem_32(addr, c, Z_R1_scratch); 1682 break; 1683 case T_LONG: 1684 __ add2mem_64(addr, c, Z_R1_scratch); 1685 break; 1686 default: 1687 ShouldNotReachHere(); 1688 } 1689 } else { 1690 ShouldNotReachHere(); 1691 } 1692 } 1693 1694 void LIR_Assembler::fpop() { 1695 // do nothing 1696 } 1697 1698 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1699 switch (code) { 1700 case lir_sqrt: { 1701 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1702 FloatRegister src_reg = value->as_double_reg(); 1703 FloatRegister dst_reg = dest->as_double_reg(); 1704 __ z_sqdbr(dst_reg, src_reg); 1705 break; 1706 } 1707 case lir_abs: { 1708 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1709 FloatRegister src_reg = value->as_double_reg(); 1710 FloatRegister dst_reg = dest->as_double_reg(); 1711 __ z_lpdbr(dst_reg, src_reg); 1712 break; 1713 } 1714 default: { 1715 ShouldNotReachHere(); 1716 break; 1717 } 1718 } 1719 } 1720 1721 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1722 if (left->is_single_cpu()) { 1723 Register reg = left->as_register(); 1724 if (right->is_constant()) { 1725 int val = right->as_constant_ptr()->as_jint(); 1726 switch (code) { 1727 case lir_logic_and: __ z_nilf(reg, val); break; 1728 case lir_logic_or: __ z_oilf(reg, val); break; 1729 case lir_logic_xor: __ z_xilf(reg, val); break; 1730 default: ShouldNotReachHere(); 1731 } 1732 } else if (right->is_stack()) { 1733 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1734 switch (code) { 1735 case lir_logic_and: __ z_ny(reg, raddr); break; 1736 case lir_logic_or: __ z_oy(reg, raddr); break; 1737 case lir_logic_xor: __ z_xy(reg, raddr); break; 1738 default: ShouldNotReachHere(); 1739 } 1740 } else { 1741 Register rright = right->as_register(); 1742 switch (code) { 1743 case lir_logic_and: __ z_nr(reg, rright); break; 1744 case lir_logic_or : __ z_or(reg, rright); break; 1745 case lir_logic_xor: __ z_xr(reg, rright); break; 1746 default: ShouldNotReachHere(); 1747 } 1748 } 1749 move_regs(reg, dst->as_register()); 1750 } else { 1751 Register l_lo = left->as_register_lo(); 1752 if (right->is_constant()) { 1753 __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); 1754 switch (code) { 1755 case lir_logic_and: 1756 __ z_ngr(l_lo, Z_R1_scratch); 1757 break; 1758 case lir_logic_or: 1759 __ z_ogr(l_lo, Z_R1_scratch); 1760 break; 1761 case lir_logic_xor: 1762 __ z_xgr(l_lo, Z_R1_scratch); 1763 break; 1764 default: ShouldNotReachHere(); 1765 } 1766 } else { 1767 Register r_lo; 1768 if (is_reference_type(right->type())) { 1769 r_lo = right->as_register(); 1770 } else { 1771 r_lo = right->as_register_lo(); 1772 } 1773 switch (code) { 1774 case lir_logic_and: 1775 __ z_ngr(l_lo, r_lo); 1776 break; 1777 case lir_logic_or: 1778 __ z_ogr(l_lo, r_lo); 1779 break; 1780 case lir_logic_xor: 1781 __ z_xgr(l_lo, r_lo); 1782 break; 1783 default: ShouldNotReachHere(); 1784 } 1785 } 1786 1787 Register dst_lo = dst->as_register_lo(); 1788 1789 move_regs(l_lo, dst_lo); 1790 } 1791 } 1792 1793 // See operand selection in LIRGenerator::do_ArithmeticOp_Int(). 1794 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 1795 if (left->is_double_cpu()) { 1796 // 64 bit integer case 1797 assert(left->is_double_cpu(), "left must be register"); 1798 assert(right->is_double_cpu() || is_power_of_2_long(right->as_jlong()), 1799 "right must be register or power of 2 constant"); 1800 assert(result->is_double_cpu(), "result must be register"); 1801 1802 Register lreg = left->as_register_lo(); 1803 Register dreg = result->as_register_lo(); 1804 1805 if (right->is_constant()) { 1806 // Convert division by a power of two into some shifts and logical operations. 1807 Register treg1 = Z_R0_scratch; 1808 Register treg2 = Z_R1_scratch; 1809 jlong divisor = right->as_jlong(); 1810 jlong log_divisor = log2_long(right->as_jlong()); 1811 1812 if (divisor == min_jlong) { 1813 // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1. 1814 if (dreg == lreg) { 1815 NearLabel done; 1816 __ load_const_optimized(treg2, min_jlong); 1817 __ z_cgr(lreg, treg2); 1818 __ z_lghi(dreg, 0); // Preserves condition code. 1819 __ z_brne(done); 1820 __ z_lghi(dreg, 1); // min_jlong / min_jlong = 1 1821 __ bind(done); 1822 } else { 1823 assert_different_registers(dreg, lreg); 1824 NearLabel done; 1825 __ z_lghi(dreg, 0); 1826 __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done); 1827 __ z_lghi(dreg, 1); 1828 __ bind(done); 1829 } 1830 return; 1831 } 1832 __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG); 1833 if (divisor == 2) { 1834 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1835 } else { 1836 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1837 __ and_imm(treg2, divisor - 1, treg1, true); 1838 } 1839 if (code == lir_idiv) { 1840 __ z_agr(dreg, treg2); 1841 __ z_srag(dreg, dreg, log_divisor); 1842 } else { 1843 assert(code == lir_irem, "check"); 1844 __ z_agr(treg2, dreg); 1845 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1846 __ z_sgr(dreg, treg2); 1847 } 1848 return; 1849 } 1850 1851 // Divisor is not a power of 2 constant. 1852 Register rreg = right->as_register_lo(); 1853 Register treg = temp->as_register_lo(); 1854 assert(right->is_double_cpu(), "right must be register"); 1855 assert(lreg == Z_R11, "see ldivInOpr()"); 1856 assert(rreg != lreg, "right register must not be same as left register"); 1857 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) || 1858 (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()"); 1859 1860 Register R1 = lreg->predecessor(); 1861 Register R2 = rreg; 1862 assert(code != lir_idiv || lreg==dreg, "see code below"); 1863 if (code == lir_idiv) { 1864 __ z_lcgr(lreg, lreg); 1865 } else { 1866 __ clear_reg(dreg, true, false); 1867 } 1868 NearLabel done; 1869 __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); 1870 if (code == lir_idiv) { 1871 __ z_lcgr(lreg, lreg); // Revert lcgr above. 1872 } 1873 if (ImplicitDiv0Checks) { 1874 // No debug info because the idiv won't trap. 1875 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1876 // which is unnecessary, too. 1877 add_debug_info_for_div0(__ offset(), info); 1878 } 1879 __ z_dsgr(R1, R2); 1880 __ bind(done); 1881 return; 1882 } 1883 1884 // 32 bit integer case 1885 1886 assert(left->is_single_cpu(), "left must be register"); 1887 assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant"); 1888 assert(result->is_single_cpu(), "result must be register"); 1889 1890 Register lreg = left->as_register(); 1891 Register dreg = result->as_register(); 1892 1893 if (right->is_constant()) { 1894 // Convert division by a power of two into some shifts and logical operations. 1895 Register treg1 = Z_R0_scratch; 1896 Register treg2 = Z_R1_scratch; 1897 jlong divisor = right->as_jint(); 1898 jlong log_divisor = log2_long(right->as_jint()); 1899 __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend 1900 if (divisor == 2) { 1901 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1902 } else { 1903 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1904 __ and_imm(treg2, divisor - 1, treg1, true); 1905 } 1906 if (code == lir_idiv) { 1907 __ z_agr(dreg, treg2); 1908 __ z_srag(dreg, dreg, log_divisor); 1909 } else { 1910 assert(code == lir_irem, "check"); 1911 __ z_agr(treg2, dreg); 1912 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1913 __ z_sgr(dreg, treg2); 1914 } 1915 return; 1916 } 1917 1918 // Divisor is not a power of 2 constant. 1919 Register rreg = right->as_register(); 1920 Register treg = temp->as_register(); 1921 assert(right->is_single_cpu(), "right must be register"); 1922 assert(lreg == Z_R11, "left register must be rax,"); 1923 assert(rreg != lreg, "right register must not be same as left register"); 1924 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) 1925 || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()"); 1926 1927 Register R1 = lreg->predecessor(); 1928 Register R2 = rreg; 1929 __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend 1930 if (ImplicitDiv0Checks) { 1931 // No debug info because the idiv won't trap. 1932 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1933 // which is unnecessary, too. 1934 add_debug_info_for_div0(__ offset(), info); 1935 } 1936 __ z_dsgfr(R1, R2); 1937 } 1938 1939 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1940 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1941 assert(exceptionPC->as_register() == Z_EXC_PC, "should match"); 1942 1943 // Exception object is not added to oop map by LinearScan 1944 // (LinearScan assumes that no oops are in fixed registers). 1945 info->add_register_oop(exceptionOop); 1946 1947 // Reuse the debug info from the safepoint poll for the throw op itself. 1948 __ get_PC(Z_EXC_PC); 1949 add_call_info(__ offset(), info); // for exception handler 1950 address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1951 : Runtime1::handle_exception_nofpu_id); 1952 emit_call_c(stub); 1953 } 1954 1955 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1956 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1957 1958 __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry); 1959 } 1960 1961 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1962 ciArrayKlass* default_type = op->expected_type(); 1963 Register src = op->src()->as_register(); 1964 Register dst = op->dst()->as_register(); 1965 Register src_pos = op->src_pos()->as_register(); 1966 Register dst_pos = op->dst_pos()->as_register(); 1967 Register length = op->length()->as_register(); 1968 Register tmp = op->tmp()->as_register(); 1969 1970 CodeStub* stub = op->stub(); 1971 int flags = op->flags(); 1972 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1973 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1974 1975 // If we don't know anything, just go through the generic arraycopy. 1976 if (default_type == NULL) { 1977 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1978 1979 if (copyfunc_addr == NULL) { 1980 // Take a slow path for generic arraycopy. 1981 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1982 __ bind(*stub->continuation()); 1983 return; 1984 } 1985 1986 // Save outgoing arguments in callee saved registers (C convention) in case 1987 // a call to System.arraycopy is needed. 1988 Register callee_saved_src = Z_R10; 1989 Register callee_saved_src_pos = Z_R11; 1990 Register callee_saved_dst = Z_R12; 1991 Register callee_saved_dst_pos = Z_R13; 1992 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 1993 1994 __ lgr_if_needed(callee_saved_src, src); 1995 __ lgr_if_needed(callee_saved_src_pos, src_pos); 1996 __ lgr_if_needed(callee_saved_dst, dst); 1997 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 1998 __ lgr_if_needed(callee_saved_length, length); 1999 2000 // C function requires 64 bit values. 2001 __ z_lgfr(src_pos, src_pos); 2002 __ z_lgfr(dst_pos, dst_pos); 2003 __ z_lgfr(length, length); 2004 2005 // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. 2006 2007 // The arguments are in the corresponding registers. 2008 assert(Z_ARG1 == src, "assumption"); 2009 assert(Z_ARG2 == src_pos, "assumption"); 2010 assert(Z_ARG3 == dst, "assumption"); 2011 assert(Z_ARG4 == dst_pos, "assumption"); 2012 assert(Z_ARG5 == length, "assumption"); 2013 #ifndef PRODUCT 2014 if (PrintC1Statistics) { 2015 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); 2016 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2017 } 2018 #endif 2019 emit_call_c(copyfunc_addr); 2020 CHECK_BAILOUT(); 2021 2022 __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2023 2024 __ z_lgr(tmp, Z_RET); 2025 __ z_xilf(tmp, -1); 2026 2027 // Restore values from callee saved registers so they are where the stub 2028 // expects them. 2029 __ lgr_if_needed(src, callee_saved_src); 2030 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2031 __ lgr_if_needed(dst, callee_saved_dst); 2032 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2033 __ lgr_if_needed(length, callee_saved_length); 2034 2035 __ z_sr(length, tmp); 2036 __ z_ar(src_pos, tmp); 2037 __ z_ar(dst_pos, tmp); 2038 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2039 2040 __ bind(*stub->continuation()); 2041 return; 2042 } 2043 2044 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 2045 2046 int elem_size = type2aelembytes(basic_type); 2047 int shift_amount; 2048 2049 switch (elem_size) { 2050 case 1 : 2051 shift_amount = 0; 2052 break; 2053 case 2 : 2054 shift_amount = 1; 2055 break; 2056 case 4 : 2057 shift_amount = 2; 2058 break; 2059 case 8 : 2060 shift_amount = 3; 2061 break; 2062 default: 2063 shift_amount = -1; 2064 ShouldNotReachHere(); 2065 } 2066 2067 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 2068 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 2069 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 2070 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 2071 2072 // Length and pos's are all sign extended at this point on 64bit. 2073 2074 // test for NULL 2075 if (flags & LIR_OpArrayCopy::src_null_check) { 2076 __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2077 } 2078 if (flags & LIR_OpArrayCopy::dst_null_check) { 2079 __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2080 } 2081 2082 // Check if negative. 2083 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2084 __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2085 } 2086 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2087 __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2088 } 2089 2090 // If the compiler was not able to prove that exact type of the source or the destination 2091 // of the arraycopy is an array type, check at runtime if the source or the destination is 2092 // an instance type. 2093 if (flags & LIR_OpArrayCopy::type_check) { 2094 assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions"); 2095 2096 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2097 __ load_klass(tmp, dst); 2098 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2099 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2100 } 2101 2102 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2103 __ load_klass(tmp, src); 2104 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2105 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2106 } 2107 } 2108 2109 if (flags & LIR_OpArrayCopy::src_range_check) { 2110 __ z_la(tmp, Address(src_pos, length)); 2111 __ z_cl(tmp, src_length_addr); 2112 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2113 } 2114 if (flags & LIR_OpArrayCopy::dst_range_check) { 2115 __ z_la(tmp, Address(dst_pos, length)); 2116 __ z_cl(tmp, dst_length_addr); 2117 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2118 } 2119 2120 if (flags & LIR_OpArrayCopy::length_positive_check) { 2121 __ z_ltr(length, length); 2122 __ branch_optimized(Assembler::bcondNegative, *stub->entry()); 2123 } 2124 2125 // Stubs require 64 bit values. 2126 __ z_lgfr(src_pos, src_pos); // int -> long 2127 __ z_lgfr(dst_pos, dst_pos); // int -> long 2128 __ z_lgfr(length, length); // int -> long 2129 2130 if (flags & LIR_OpArrayCopy::type_check) { 2131 // We don't know the array types are compatible. 2132 if (basic_type != T_OBJECT) { 2133 // Simple test for basic type arrays. 2134 if (UseCompressedClassPointers) { 2135 __ z_l(tmp, src_klass_addr); 2136 __ z_c(tmp, dst_klass_addr); 2137 } else { 2138 __ z_lg(tmp, src_klass_addr); 2139 __ z_cg(tmp, dst_klass_addr); 2140 } 2141 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2142 } else { 2143 // For object arrays, if src is a sub class of dst then we can 2144 // safely do the copy. 2145 NearLabel cont, slow; 2146 Register src_klass = Z_R1_scratch; 2147 Register dst_klass = Z_R10; 2148 2149 __ load_klass(src_klass, src); 2150 __ load_klass(dst_klass, dst); 2151 2152 __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, NULL); 2153 2154 store_parameter(src_klass, 0); // sub 2155 store_parameter(dst_klass, 1); // super 2156 emit_call_c(Runtime1::entry_for (Runtime1::slow_subtype_check_id)); 2157 CHECK_BAILOUT2(cont, slow); 2158 // Sets condition code 0 for match (2 otherwise). 2159 __ branch_optimized(Assembler::bcondEqual, cont); 2160 2161 __ bind(slow); 2162 2163 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2164 if (copyfunc_addr != NULL) { // use stub if available 2165 // Src is not a sub class of dst so we have to do a 2166 // per-element check. 2167 2168 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2169 if ((flags & mask) != mask) { 2170 // Check that at least both of them object arrays. 2171 assert(flags & mask, "one of the two should be known to be an object array"); 2172 2173 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2174 __ load_klass(tmp, src); 2175 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2176 __ load_klass(tmp, dst); 2177 } 2178 Address klass_lh_addr(tmp, Klass::layout_helper_offset()); 2179 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2180 __ load_const_optimized(Z_R1_scratch, objArray_lh); 2181 __ z_c(Z_R1_scratch, klass_lh_addr); 2182 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2183 } 2184 2185 // Save outgoing arguments in callee saved registers (C convention) in case 2186 // a call to System.arraycopy is needed. 2187 Register callee_saved_src = Z_R10; 2188 Register callee_saved_src_pos = Z_R11; 2189 Register callee_saved_dst = Z_R12; 2190 Register callee_saved_dst_pos = Z_R13; 2191 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 2192 2193 __ lgr_if_needed(callee_saved_src, src); 2194 __ lgr_if_needed(callee_saved_src_pos, src_pos); 2195 __ lgr_if_needed(callee_saved_dst, dst); 2196 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 2197 __ lgr_if_needed(callee_saved_length, length); 2198 2199 __ z_llgfr(length, length); // Higher 32bits must be null. 2200 2201 __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset 2202 __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset 2203 2204 __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type))); 2205 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2206 __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type))); 2207 assert_different_registers(Z_ARG2, dst, length); 2208 2209 __ z_lgr(Z_ARG3, length); 2210 assert_different_registers(Z_ARG3, dst); 2211 2212 __ load_klass(Z_ARG5, dst); 2213 __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset())); 2214 __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset())); 2215 emit_call_c(copyfunc_addr); 2216 CHECK_BAILOUT2(cont, slow); 2217 2218 #ifndef PRODUCT 2219 if (PrintC1Statistics) { 2220 NearLabel failed; 2221 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed); 2222 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt); 2223 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2224 __ bind(failed); 2225 } 2226 #endif 2227 2228 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2229 2230 #ifndef PRODUCT 2231 if (PrintC1Statistics) { 2232 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); 2233 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2234 } 2235 #endif 2236 2237 __ z_lgr(tmp, Z_RET); 2238 __ z_xilf(tmp, -1); 2239 2240 // Restore previously spilled arguments 2241 __ lgr_if_needed(src, callee_saved_src); 2242 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2243 __ lgr_if_needed(dst, callee_saved_dst); 2244 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2245 __ lgr_if_needed(length, callee_saved_length); 2246 2247 __ z_sr(length, tmp); 2248 __ z_ar(src_pos, tmp); 2249 __ z_ar(dst_pos, tmp); 2250 } 2251 2252 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2253 2254 __ bind(cont); 2255 } 2256 } 2257 2258 #ifdef ASSERT 2259 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2260 // Sanity check the known type with the incoming class. For the 2261 // primitive case the types must match exactly with src.klass and 2262 // dst.klass each exactly matching the default type. For the 2263 // object array case, if no type check is needed then either the 2264 // dst type is exactly the expected type and the src type is a 2265 // subtype which we can't check or src is the same array as dst 2266 // but not necessarily exactly of type default_type. 2267 NearLabel known_ok, halt; 2268 metadata2reg(default_type->constant_encoding(), tmp); 2269 if (UseCompressedClassPointers) { 2270 __ encode_klass_not_null(tmp); 2271 } 2272 2273 if (basic_type != T_OBJECT) { 2274 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2275 else { __ z_cg(tmp, dst_klass_addr); } 2276 __ branch_optimized(Assembler::bcondNotEqual, halt); 2277 if (UseCompressedClassPointers) { __ z_c (tmp, src_klass_addr); } 2278 else { __ z_cg(tmp, src_klass_addr); } 2279 __ branch_optimized(Assembler::bcondEqual, known_ok); 2280 } else { 2281 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2282 else { __ z_cg(tmp, dst_klass_addr); } 2283 __ branch_optimized(Assembler::bcondEqual, known_ok); 2284 __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); 2285 } 2286 __ bind(halt); 2287 __ stop("incorrect type information in arraycopy"); 2288 __ bind(known_ok); 2289 } 2290 #endif 2291 2292 #ifndef PRODUCT 2293 if (PrintC1Statistics) { 2294 __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type)); 2295 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2296 } 2297 #endif 2298 2299 __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset 2300 __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset 2301 2302 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2303 __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type))); 2304 assert_different_registers(Z_ARG2, length); 2305 __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type))); 2306 __ lgr_if_needed(Z_ARG3, length); 2307 2308 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2309 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2310 const char *name; 2311 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2312 __ call_VM_leaf(entry); 2313 2314 __ bind(*stub->continuation()); 2315 } 2316 2317 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2318 if (dest->is_single_cpu()) { 2319 if (left->type() == T_OBJECT) { 2320 switch (code) { 2321 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2322 case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2323 case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2324 default: ShouldNotReachHere(); 2325 } 2326 } else { 2327 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2328 Register masked_count = Z_R1_scratch; 2329 __ z_lr(masked_count, count->as_register()); 2330 __ z_nill(masked_count, 31); 2331 switch (code) { 2332 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break; 2333 case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break; 2334 case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break; 2335 default: ShouldNotReachHere(); 2336 } 2337 } 2338 } else { 2339 switch (code) { 2340 case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2341 case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2342 case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2343 default: ShouldNotReachHere(); 2344 } 2345 } 2346 } 2347 2348 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2349 if (left->type() == T_OBJECT) { 2350 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2351 Register l = left->as_register(); 2352 Register d = dest->as_register_lo(); 2353 switch (code) { 2354 case lir_shl: __ z_sllg (d, l, count); break; 2355 case lir_shr: __ z_srag (d, l, count); break; 2356 case lir_ushr: __ z_srlg (d, l, count); break; 2357 default: ShouldNotReachHere(); 2358 } 2359 return; 2360 } 2361 if (dest->is_single_cpu()) { 2362 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2363 count = count & 0x1F; // Java spec 2364 switch (code) { 2365 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break; 2366 case lir_shr: __ z_sra (dest->as_register(), count); break; 2367 case lir_ushr: __ z_srl (dest->as_register(), count); break; 2368 default: ShouldNotReachHere(); 2369 } 2370 } else if (dest->is_double_cpu()) { 2371 count = count & 63; // Java spec 2372 Register l = left->as_pointer_register(); 2373 Register d = dest->as_pointer_register(); 2374 switch (code) { 2375 case lir_shl: __ z_sllg (d, l, count); break; 2376 case lir_shr: __ z_srag (d, l, count); break; 2377 case lir_ushr: __ z_srlg (d, l, count); break; 2378 default: ShouldNotReachHere(); 2379 } 2380 } else { 2381 ShouldNotReachHere(); 2382 } 2383 } 2384 2385 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2386 if (op->init_check()) { 2387 // Make sure klass is initialized & doesn't have finalizer. 2388 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 2389 Register iklass = op->klass()->as_register(); 2390 add_debug_info_for_null_check_here(op->stub()->info()); 2391 if (Immediate::is_uimm12(state_offset)) { 2392 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 2393 } else { 2394 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 2395 } 2396 __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far. 2397 } 2398 __ allocate_object(op->obj()->as_register(), 2399 op->tmp1()->as_register(), 2400 op->tmp2()->as_register(), 2401 op->header_size(), 2402 op->object_size(), 2403 op->klass()->as_register(), 2404 *op->stub()->entry()); 2405 __ bind(*op->stub()->continuation()); 2406 __ verify_oop(op->obj()->as_register()); 2407 } 2408 2409 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2410 Register len = op->len()->as_register(); 2411 __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend 2412 2413 if (UseSlowPath || 2414 (!UseFastNewObjectArray && (is_reference_type(op->type()))) || 2415 (!UseFastNewTypeArray && (!is_reference_type(op->type())))) { 2416 __ z_brul(*op->stub()->entry()); 2417 } else { 2418 __ allocate_array(op->obj()->as_register(), 2419 op->len()->as_register(), 2420 op->tmp1()->as_register(), 2421 op->tmp2()->as_register(), 2422 arrayOopDesc::header_size(op->type()), 2423 type2aelembytes(op->type()), 2424 op->klass()->as_register(), 2425 *op->stub()->entry()); 2426 } 2427 __ bind(*op->stub()->continuation()); 2428 } 2429 2430 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 2431 Register recv, Register tmp1, Label* update_done) { 2432 uint i; 2433 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2434 Label next_test; 2435 // See if the receiver is receiver[n]. 2436 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2437 __ z_cg(recv, receiver_addr); 2438 __ z_brne(next_test); 2439 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 2440 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2441 __ branch_optimized(Assembler::bcondAlways, *update_done); 2442 __ bind(next_test); 2443 } 2444 2445 // Didn't find receiver; find next empty slot and fill it in. 2446 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2447 Label next_test; 2448 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2449 __ z_ltg(Z_R0_scratch, recv_addr); 2450 __ z_brne(next_test); 2451 __ z_stg(recv, recv_addr); 2452 __ load_const_optimized(tmp1, DataLayout::counter_increment); 2453 __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo); 2454 __ branch_optimized(Assembler::bcondAlways, *update_done); 2455 __ bind(next_test); 2456 } 2457 } 2458 2459 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2460 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2461 Unimplemented(); 2462 } 2463 2464 void LIR_Assembler::store_parameter(Register r, int param_num) { 2465 assert(param_num >= 0, "invalid num"); 2466 int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame; 2467 assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2468 __ z_stg(r, offset_in_bytes, Z_SP); 2469 } 2470 2471 void LIR_Assembler::store_parameter(jint c, int param_num) { 2472 assert(param_num >= 0, "invalid num"); 2473 int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame; 2474 assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2475 __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true); 2476 } 2477 2478 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2479 // We always need a stub for the failure case. 2480 CodeStub* stub = op->stub(); 2481 Register obj = op->object()->as_register(); 2482 Register k_RInfo = op->tmp1()->as_register(); 2483 Register klass_RInfo = op->tmp2()->as_register(); 2484 Register dst = op->result_opr()->as_register(); 2485 Register Rtmp1 = Z_R1_scratch; 2486 ciKlass* k = op->klass(); 2487 2488 assert(!op->tmp3()->is_valid(), "tmp3's not needed"); 2489 2490 // Check if it needs to be profiled. 2491 ciMethodData* md = NULL; 2492 ciProfileData* data = NULL; 2493 2494 if (op->should_profile()) { 2495 ciMethod* method = op->profiled_method(); 2496 assert(method != NULL, "Should have method"); 2497 int bci = op->profiled_bci(); 2498 md = method->method_data_or_null(); 2499 assert(md != NULL, "Sanity"); 2500 data = md->bci_to_data(bci); 2501 assert(data != NULL, "need data for type check"); 2502 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2503 } 2504 2505 // Temp operands do not overlap with inputs, if this is their last 2506 // use (end of range is exclusive), so a register conflict is possible. 2507 if (obj == k_RInfo) { 2508 k_RInfo = dst; 2509 } else if (obj == klass_RInfo) { 2510 klass_RInfo = dst; 2511 } 2512 assert_different_registers(obj, k_RInfo, klass_RInfo); 2513 2514 if (op->should_profile()) { 2515 NearLabel not_null; 2516 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2517 // Object is null; update MDO and exit. 2518 Register mdo = klass_RInfo; 2519 metadata2reg(md->constant_encoding(), mdo); 2520 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2521 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2522 __ or2mem_8(data_addr, header_bits); 2523 __ branch_optimized(Assembler::bcondAlways, *obj_is_null); 2524 __ bind(not_null); 2525 } else { 2526 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null); 2527 } 2528 2529 NearLabel profile_cast_failure, profile_cast_success; 2530 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2531 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2532 2533 // Patching may screw with our temporaries on sparc, 2534 // so let's do it before loading the class. 2535 if (k->is_loaded()) { 2536 metadata2reg(k->constant_encoding(), k_RInfo); 2537 } else { 2538 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2539 } 2540 assert(obj != k_RInfo, "must be different"); 2541 2542 __ verify_oop(obj); 2543 2544 // Get object class. 2545 // Not a safepoint as obj null check happens earlier. 2546 if (op->fast_check()) { 2547 if (UseCompressedClassPointers) { 2548 __ load_klass(klass_RInfo, obj); 2549 __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); 2550 } else { 2551 __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 2552 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2553 } 2554 // Successful cast, fall through to profile or jump. 2555 } else { 2556 bool need_slow_path = !k->is_loaded() || 2557 ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset())); 2558 intptr_t super_check_offset = k->is_loaded() ? k->super_check_offset() : -1L; 2559 __ load_klass(klass_RInfo, obj); 2560 // Perform the fast part of the checking logic. 2561 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, 2562 (need_slow_path ? success_target : NULL), 2563 failure_target, NULL, 2564 RegisterOrConstant(super_check_offset)); 2565 if (need_slow_path) { 2566 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2567 address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); 2568 store_parameter(klass_RInfo, 0); // sub 2569 store_parameter(k_RInfo, 1); // super 2570 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2571 CHECK_BAILOUT2(profile_cast_failure, profile_cast_success); 2572 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2573 // Fall through to success case. 2574 } 2575 } 2576 2577 if (op->should_profile()) { 2578 Register mdo = klass_RInfo, recv = k_RInfo; 2579 assert_different_registers(obj, mdo, recv); 2580 __ bind(profile_cast_success); 2581 metadata2reg(md->constant_encoding(), mdo); 2582 __ load_klass(recv, obj); 2583 type_profile_helper(mdo, md, data, recv, Rtmp1, success); 2584 __ branch_optimized(Assembler::bcondAlways, *success); 2585 2586 __ bind(profile_cast_failure); 2587 metadata2reg(md->constant_encoding(), mdo); 2588 __ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1); 2589 __ branch_optimized(Assembler::bcondAlways, *failure); 2590 } else { 2591 __ branch_optimized(Assembler::bcondAlways, *success); 2592 } 2593 } 2594 2595 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2596 LIR_Code code = op->code(); 2597 if (code == lir_store_check) { 2598 Register value = op->object()->as_register(); 2599 Register array = op->array()->as_register(); 2600 Register k_RInfo = op->tmp1()->as_register(); 2601 Register klass_RInfo = op->tmp2()->as_register(); 2602 Register Rtmp1 = Z_R1_scratch; 2603 2604 CodeStub* stub = op->stub(); 2605 2606 // Check if it needs to be profiled. 2607 ciMethodData* md = NULL; 2608 ciProfileData* data = NULL; 2609 2610 assert_different_registers(value, k_RInfo, klass_RInfo); 2611 2612 if (op->should_profile()) { 2613 ciMethod* method = op->profiled_method(); 2614 assert(method != NULL, "Should have method"); 2615 int bci = op->profiled_bci(); 2616 md = method->method_data_or_null(); 2617 assert(md != NULL, "Sanity"); 2618 data = md->bci_to_data(bci); 2619 assert(data != NULL, "need data for type check"); 2620 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2621 } 2622 NearLabel profile_cast_success, profile_cast_failure, done; 2623 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2624 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2625 2626 if (op->should_profile()) { 2627 NearLabel not_null; 2628 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2629 // Object is null; update MDO and exit. 2630 Register mdo = klass_RInfo; 2631 metadata2reg(md->constant_encoding(), mdo); 2632 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2633 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2634 __ or2mem_8(data_addr, header_bits); 2635 __ branch_optimized(Assembler::bcondAlways, done); 2636 __ bind(not_null); 2637 } else { 2638 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done); 2639 } 2640 2641 add_debug_info_for_null_check_here(op->info_for_exception()); 2642 __ load_klass(k_RInfo, array); 2643 __ load_klass(klass_RInfo, value); 2644 2645 // Get instance klass (it's already uncompressed). 2646 __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2647 // Perform the fast part of the checking logic. 2648 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 2649 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2650 address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); 2651 store_parameter(klass_RInfo, 0); // sub 2652 store_parameter(k_RInfo, 1); // super 2653 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2654 CHECK_BAILOUT3(profile_cast_success, profile_cast_failure, done); 2655 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2656 // Fall through to success case. 2657 2658 if (op->should_profile()) { 2659 Register mdo = klass_RInfo, recv = k_RInfo; 2660 assert_different_registers(value, mdo, recv); 2661 __ bind(profile_cast_success); 2662 metadata2reg(md->constant_encoding(), mdo); 2663 __ load_klass(recv, value); 2664 type_profile_helper(mdo, md, data, recv, Rtmp1, &done); 2665 __ branch_optimized(Assembler::bcondAlways, done); 2666 2667 __ bind(profile_cast_failure); 2668 metadata2reg(md->constant_encoding(), mdo); 2669 __ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1); 2670 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2671 } 2672 2673 __ bind(done); 2674 } else { 2675 if (code == lir_checkcast) { 2676 Register obj = op->object()->as_register(); 2677 Register dst = op->result_opr()->as_register(); 2678 NearLabel success; 2679 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2680 __ bind(success); 2681 __ lgr_if_needed(dst, obj); 2682 } else { 2683 if (code == lir_instanceof) { 2684 Register obj = op->object()->as_register(); 2685 Register dst = op->result_opr()->as_register(); 2686 NearLabel success, failure, done; 2687 emit_typecheck_helper(op, &success, &failure, &failure); 2688 __ bind(failure); 2689 __ clear_reg(dst); 2690 __ branch_optimized(Assembler::bcondAlways, done); 2691 __ bind(success); 2692 __ load_const_optimized(dst, 1); 2693 __ bind(done); 2694 } else { 2695 ShouldNotReachHere(); 2696 } 2697 } 2698 } 2699 } 2700 2701 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2702 Register addr = op->addr()->as_pointer_register(); 2703 Register t1_cmp = Z_R1_scratch; 2704 if (op->code() == lir_cas_long) { 2705 assert(VM_Version::supports_cx8(), "wrong machine"); 2706 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2707 Register new_value_lo = op->new_value()->as_register_lo(); 2708 __ z_lgr(t1_cmp, cmp_value_lo); 2709 // Perform the compare and swap operation. 2710 __ z_csg(t1_cmp, new_value_lo, 0, addr); 2711 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2712 Register cmp_value = op->cmp_value()->as_register(); 2713 Register new_value = op->new_value()->as_register(); 2714 if (op->code() == lir_cas_obj) { 2715 if (UseCompressedOops) { 2716 t1_cmp = op->tmp1()->as_register(); 2717 Register t2_new = op->tmp2()->as_register(); 2718 assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new); 2719 __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/); 2720 __ oop_encoder(t2_new, new_value, true /*maybe null*/); 2721 __ z_cs(t1_cmp, t2_new, 0, addr); 2722 } else { 2723 __ z_lgr(t1_cmp, cmp_value); 2724 __ z_csg(t1_cmp, new_value, 0, addr); 2725 } 2726 } else { 2727 __ z_lr(t1_cmp, cmp_value); 2728 __ z_cs(t1_cmp, new_value, 0, addr); 2729 } 2730 } else { 2731 ShouldNotReachHere(); // new lir_cas_?? 2732 } 2733 } 2734 2735 void LIR_Assembler::set_24bit_FPU() { 2736 ShouldNotCallThis(); // x86 only 2737 } 2738 2739 void LIR_Assembler::reset_FPU() { 2740 ShouldNotCallThis(); // x86 only 2741 } 2742 2743 void LIR_Assembler::breakpoint() { 2744 Unimplemented(); 2745 // __ breakpoint_trap(); 2746 } 2747 2748 void LIR_Assembler::push(LIR_Opr opr) { 2749 ShouldNotCallThis(); // unused 2750 } 2751 2752 void LIR_Assembler::pop(LIR_Opr opr) { 2753 ShouldNotCallThis(); // unused 2754 } 2755 2756 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2757 Address addr = frame_map()->address_for_monitor_lock(monitor_no); 2758 __ add2reg(dst_opr->as_register(), addr.disp(), addr.base()); 2759 } 2760 2761 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2762 Register obj = op->obj_opr()->as_register(); // May not be an oop. 2763 Register hdr = op->hdr_opr()->as_register(); 2764 Register lock = op->lock_opr()->as_register(); 2765 if (!UseFastLocking) { 2766 __ branch_optimized(Assembler::bcondAlways, *op->stub()->entry()); 2767 } else if (op->code() == lir_lock) { 2768 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2769 // Add debug info for NullPointerException only if one is possible. 2770 if (op->info() != NULL) { 2771 add_debug_info_for_null_check_here(op->info()); 2772 } 2773 __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2774 // done 2775 } else if (op->code() == lir_unlock) { 2776 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2777 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2778 } else { 2779 ShouldNotReachHere(); 2780 } 2781 __ bind(*op->stub()->continuation()); 2782 } 2783 2784 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2785 ciMethod* method = op->profiled_method(); 2786 int bci = op->profiled_bci(); 2787 ciMethod* callee = op->profiled_callee(); 2788 2789 // Update counter for all call types. 2790 ciMethodData* md = method->method_data_or_null(); 2791 assert(md != NULL, "Sanity"); 2792 ciProfileData* data = md->bci_to_data(bci); 2793 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2794 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2795 Register mdo = op->mdo()->as_register(); 2796 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2797 Register tmp1 = op->tmp1()->as_register_lo(); 2798 metadata2reg(md->constant_encoding(), mdo); 2799 2800 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2801 // Perform additional virtual call profiling for invokevirtual and 2802 // invokeinterface bytecodes 2803 if (op->should_profile_receiver_type()) { 2804 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2805 Register recv = op->recv()->as_register(); 2806 assert_different_registers(mdo, tmp1, recv); 2807 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2808 ciKlass* known_klass = op->known_holder(); 2809 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2810 // We know the type that will be seen at this call site; we can 2811 // statically update the MethodData* rather than needing to do 2812 // dynamic tests on the receiver type. 2813 2814 // NOTE: we should probably put a lock around this search to 2815 // avoid collisions by concurrent compilations. 2816 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2817 uint i; 2818 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2819 ciKlass* receiver = vc_data->receiver(i); 2820 if (known_klass->equals(receiver)) { 2821 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2822 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2823 return; 2824 } 2825 } 2826 2827 // Receiver type not found in profile data. Select an empty slot. 2828 2829 // Note that this is less efficient than it should be because it 2830 // always does a write to the receiver part of the 2831 // VirtualCallData rather than just the first time. 2832 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2833 ciKlass* receiver = vc_data->receiver(i); 2834 if (receiver == NULL) { 2835 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2836 metadata2reg(known_klass->constant_encoding(), tmp1); 2837 __ z_stg(tmp1, recv_addr); 2838 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2839 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2840 return; 2841 } 2842 } 2843 } else { 2844 __ load_klass(recv, recv); 2845 NearLabel update_done; 2846 type_profile_helper(mdo, md, data, recv, tmp1, &update_done); 2847 // Receiver did not match any saved receiver and there is no empty row for it. 2848 // Increment total counter to indicate polymorphic case. 2849 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2850 __ bind(update_done); 2851 } 2852 } else { 2853 // static call 2854 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2855 } 2856 } 2857 2858 void LIR_Assembler::align_backward_branch_target() { 2859 __ align(OptoLoopAlignment); 2860 } 2861 2862 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2863 ShouldNotCallThis(); // There are no delay slots on ZARCH_64. 2864 } 2865 2866 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2867 // tmp must be unused 2868 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2869 assert(left->is_register(), "can only handle registers"); 2870 2871 if (left->is_single_cpu()) { 2872 __ z_lcr(dest->as_register(), left->as_register()); 2873 } else if (left->is_single_fpu()) { 2874 __ z_lcebr(dest->as_float_reg(), left->as_float_reg()); 2875 } else if (left->is_double_fpu()) { 2876 __ z_lcdbr(dest->as_double_reg(), left->as_double_reg()); 2877 } else { 2878 assert(left->is_double_cpu(), "Must be a long"); 2879 __ z_lcgr(dest->as_register_lo(), left->as_register_lo()); 2880 } 2881 } 2882 2883 void LIR_Assembler::fxch(int i) { 2884 ShouldNotCallThis(); // x86 only 2885 } 2886 2887 void LIR_Assembler::fld(int i) { 2888 ShouldNotCallThis(); // x86 only 2889 } 2890 2891 void LIR_Assembler::ffree(int i) { 2892 ShouldNotCallThis(); // x86 only 2893 } 2894 2895 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2896 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2897 assert(!tmp->is_valid(), "don't need temporary"); 2898 emit_call_c(dest); 2899 CHECK_BAILOUT(); 2900 if (info != NULL) { 2901 add_call_info_here(info); 2902 } 2903 } 2904 2905 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2906 ShouldNotCallThis(); // not needed on ZARCH_64 2907 } 2908 2909 void LIR_Assembler::membar() { 2910 __ z_fence(); 2911 } 2912 2913 void LIR_Assembler::membar_acquire() { 2914 __ z_acquire(); 2915 } 2916 2917 void LIR_Assembler::membar_release() { 2918 __ z_release(); 2919 } 2920 2921 void LIR_Assembler::membar_loadload() { 2922 __ z_acquire(); 2923 } 2924 2925 void LIR_Assembler::membar_storestore() { 2926 __ z_release(); 2927 } 2928 2929 void LIR_Assembler::membar_loadstore() { 2930 __ z_acquire(); 2931 } 2932 2933 void LIR_Assembler::membar_storeload() { 2934 __ z_fence(); 2935 } 2936 2937 void LIR_Assembler::on_spin_wait() { 2938 Unimplemented(); 2939 } 2940 2941 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2942 assert(patch_code == lir_patch_none, "Patch code not supported"); 2943 LIR_Address* addr = addr_opr->as_address_ptr(); 2944 assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); 2945 __ load_address(dest->as_pointer_register(), as_Address(addr)); 2946 } 2947 2948 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2949 ShouldNotCallThis(); // unused 2950 } 2951 2952 #ifdef ASSERT 2953 // Emit run-time assertion. 2954 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2955 Unimplemented(); 2956 } 2957 #endif 2958 2959 void LIR_Assembler::peephole(LIR_List*) { 2960 // Do nothing for now. 2961 } 2962 2963 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2964 assert(code == lir_xadd, "lir_xchg not supported"); 2965 Address src_addr = as_Address(src->as_address_ptr()); 2966 Register base = src_addr.base(); 2967 intptr_t disp = src_addr.disp(); 2968 if (src_addr.index()->is_valid()) { 2969 // LAA and LAAG do not support index register. 2970 __ load_address(Z_R1_scratch, src_addr); 2971 base = Z_R1_scratch; 2972 disp = 0; 2973 } 2974 if (data->type() == T_INT) { 2975 __ z_laa(dest->as_register(), data->as_register(), disp, base); 2976 } else if (data->type() == T_LONG) { 2977 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 2978 __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base); 2979 } else { 2980 ShouldNotReachHere(); 2981 } 2982 } 2983 2984 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2985 Register obj = op->obj()->as_register(); 2986 Register tmp1 = op->tmp()->as_pointer_register(); 2987 Register tmp2 = Z_R1_scratch; 2988 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2989 ciKlass* exact_klass = op->exact_klass(); 2990 intptr_t current_klass = op->current_klass(); 2991 bool not_null = op->not_null(); 2992 bool no_conflict = op->no_conflict(); 2993 2994 Label update, next, none, null_seen, init_klass; 2995 2996 bool do_null = !not_null; 2997 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2998 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2999 3000 assert(do_null || do_update, "why are we here?"); 3001 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3002 3003 __ verify_oop(obj); 3004 3005 if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) { 3006 __ z_ltgr(tmp1, obj); 3007 } 3008 if (do_null) { 3009 __ z_brnz(update); 3010 if (!TypeEntries::was_null_seen(current_klass)) { 3011 __ z_lg(tmp1, mdo_addr); 3012 __ z_oill(tmp1, TypeEntries::null_seen); 3013 __ z_stg(tmp1, mdo_addr); 3014 } 3015 if (do_update) { 3016 __ z_bru(next); 3017 } 3018 } else { 3019 __ asm_assert_ne("unexpect null obj", __LINE__); 3020 } 3021 3022 __ bind(update); 3023 3024 if (do_update) { 3025 #ifdef ASSERT 3026 if (exact_klass != NULL) { 3027 __ load_klass(tmp1, tmp1); 3028 metadata2reg(exact_klass->constant_encoding(), tmp2); 3029 __ z_cgr(tmp1, tmp2); 3030 __ asm_assert_eq("exact klass and actual klass differ", __LINE__); 3031 } 3032 #endif 3033 3034 Label do_update; 3035 __ z_lg(tmp2, mdo_addr); 3036 3037 if (!no_conflict) { 3038 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3039 if (exact_klass != NULL) { 3040 metadata2reg(exact_klass->constant_encoding(), tmp1); 3041 } else { 3042 __ load_klass(tmp1, tmp1); 3043 } 3044 3045 // Klass seen before: nothing to do (regardless of unknown bit). 3046 __ z_lgr(Z_R0_scratch, tmp2); 3047 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3048 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3049 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3050 3051 // Already unknown: Nothing to do anymore. 3052 __ z_tmll(tmp2, TypeEntries::type_unknown); 3053 __ z_brc(Assembler::bcondAllOne, next); 3054 3055 if (TypeEntries::is_type_none(current_klass)) { 3056 __ z_lgr(Z_R0_scratch, tmp2); 3057 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3058 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3059 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); 3060 } 3061 } else { 3062 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3063 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3064 3065 // Already unknown: Nothing to do anymore. 3066 __ z_tmll(tmp2, TypeEntries::type_unknown); 3067 __ z_brc(Assembler::bcondAllOne, next); 3068 } 3069 3070 // Different than before. Cannot keep accurate profile. 3071 __ z_oill(tmp2, TypeEntries::type_unknown); 3072 __ z_bru(do_update); 3073 } else { 3074 // There's a single possible klass at this profile point. 3075 assert(exact_klass != NULL, "should be"); 3076 if (TypeEntries::is_type_none(current_klass)) { 3077 metadata2reg(exact_klass->constant_encoding(), tmp1); 3078 __ z_lgr(Z_R0_scratch, tmp2); 3079 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 3080 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3081 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3082 #ifdef ASSERT 3083 { 3084 Label ok; 3085 __ z_lgr(Z_R0_scratch, tmp2); 3086 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3087 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3088 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok); 3089 __ stop("unexpected profiling mismatch"); 3090 __ bind(ok); 3091 } 3092 #endif 3093 3094 } else { 3095 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3096 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3097 3098 // Already unknown: Nothing to do anymore. 3099 __ z_tmll(tmp2, TypeEntries::type_unknown); 3100 __ z_brc(Assembler::bcondAllOne, next); 3101 __ z_oill(tmp2, TypeEntries::type_unknown); 3102 __ z_bru(do_update); 3103 } 3104 } 3105 3106 __ bind(init_klass); 3107 // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3108 __ z_ogr(tmp2, tmp1); 3109 3110 __ bind(do_update); 3111 __ z_stg(tmp2, mdo_addr); 3112 3113 __ bind(next); 3114 } 3115 } 3116 3117 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3118 assert(op->crc()->is_single_cpu(), "crc must be register"); 3119 assert(op->val()->is_single_cpu(), "byte value must be register"); 3120 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3121 Register crc = op->crc()->as_register(); 3122 Register val = op->val()->as_register(); 3123 Register res = op->result_opr()->as_register(); 3124 3125 assert_different_registers(val, crc, res); 3126 3127 __ load_const_optimized(res, StubRoutines::crc_table_addr()); 3128 __ kernel_crc32_singleByteReg(crc, val, res, true); 3129 __ z_lgfr(res, crc); 3130 } 3131 3132 #undef __