1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016, 2017, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/cardTableModRefBS.hpp" 37 #include "nativeInst_s390.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "runtime/safepointMechanism.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 #include "vmreg_s390.inline.hpp" 42 43 #define __ _masm-> 44 45 #ifndef PRODUCT 46 #undef __ 47 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm) : _masm)-> 48 #endif 49 50 //------------------------------------------------------------ 51 52 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 53 // Not used on ZARCH_64 54 ShouldNotCallThis(); 55 return false; 56 } 57 58 LIR_Opr LIR_Assembler::receiverOpr() { 59 return FrameMap::Z_R2_oop_opr; 60 } 61 62 LIR_Opr LIR_Assembler::osrBufferPointer() { 63 return FrameMap::Z_R2_opr; 64 } 65 66 int LIR_Assembler::initial_frame_size_in_bytes() const { 67 return in_bytes(frame_map()->framesize_in_bytes()); 68 } 69 70 // Inline cache check: done before the frame is built. 71 // The inline cached class is in Z_inline_cache(Z_R9). 72 // We fetch the class of the receiver and compare it with the cached class. 73 // If they do not match we jump to the slow case. 74 int LIR_Assembler::check_icache() { 75 Register receiver = receiverOpr()->as_register(); 76 int offset = __ offset(); 77 __ inline_cache_check(receiver, Z_inline_cache); 78 return offset; 79 } 80 81 void LIR_Assembler::osr_entry() { 82 // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp): 83 // 84 // 1. Create a new compiled activation. 85 // 2. Initialize local variables in the compiled activation. The expression stack must be empty 86 // at the osr_bci; it is not initialized. 87 // 3. Jump to the continuation address in compiled code to resume execution. 88 89 // OSR entry point 90 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 91 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 92 ValueStack* entry_state = osr_entry->end()->state(); 93 int number_of_locks = entry_state->locks_size(); 94 95 // Create a frame for the compiled activation. 96 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 97 98 // OSR buffer is 99 // 100 // locals[nlocals-1..0] 101 // monitors[number_of_locks-1..0] 102 // 103 // Locals is a direct copy of the interpreter frame so in the osr buffer 104 // the first slot in the local array is the last local from the interpreter 105 // and the last slot is local[0] (receiver) from the interpreter 106 // 107 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 108 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 109 // in the interpreter frame (the method lock if a sync method) 110 111 // Initialize monitors in the compiled activation. 112 // I0: pointer to osr buffer 113 // 114 // All other registers are dead at this point and the locals will be 115 // copied into place by code emitted in the IR. 116 117 Register OSR_buf = osrBufferPointer()->as_register(); 118 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 119 int monitor_offset = BytesPerWord * method()->max_locals() + 120 (2 * BytesPerWord) * (number_of_locks - 1); 121 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 122 // the OSR buffer using 2 word entries: first the lock and then 123 // the oop. 124 for (int i = 0; i < number_of_locks; i++) { 125 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 126 // Verify the interpreter's monitor has a non-null object. 127 __ asm_assert_mem8_isnot_zero(slot_offset + 1*BytesPerWord, OSR_buf, "locked object is NULL", __LINE__); 128 // Copy the lock field into the compiled activation. 129 __ z_lg(Z_R1_scratch, slot_offset + 0, OSR_buf); 130 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_lock(i)); 131 __ z_lg(Z_R1_scratch, slot_offset + 1*BytesPerWord, OSR_buf); 132 __ z_stg(Z_R1_scratch, frame_map()->address_for_monitor_object(i)); 133 } 134 } 135 } 136 137 // -------------------------------------------------------------------------------------------- 138 139 address LIR_Assembler::emit_call_c(address a) { 140 __ align_call_far_patchable(__ pc()); 141 address call_addr = __ call_c_opt(a); 142 if (call_addr == NULL) { 143 bailout("const section overflow"); 144 } 145 return call_addr; 146 } 147 148 int LIR_Assembler::emit_exception_handler() { 149 // If the last instruction is a call (typically to do a throw which 150 // is coming at the end after block reordering) the return address 151 // must still point into the code area in order to avoid assertion 152 // failures when searching for the corresponding bci. => Add a nop. 153 // (was bug 5/14/1999 - gri) 154 __ nop(); 155 156 // Generate code for exception handler. 157 address handler_base = __ start_a_stub(exception_handler_size()); 158 if (handler_base == NULL) { 159 // Not enough space left for the handler. 160 bailout("exception handler overflow"); 161 return -1; 162 } 163 164 int offset = code_offset(); 165 166 address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id); 167 address call_addr = emit_call_c(a); 168 CHECK_BAILOUT_(-1); 169 __ should_not_reach_here(); 170 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 171 __ end_a_stub(); 172 173 return offset; 174 } 175 176 // Emit the code to remove the frame from the stack in the exception 177 // unwind path. 178 int LIR_Assembler::emit_unwind_handler() { 179 #ifndef PRODUCT 180 if (CommentedAssembly) { 181 _masm->block_comment("Unwind handler"); 182 } 183 #endif 184 185 int offset = code_offset(); 186 Register exception_oop_callee_saved = Z_R10; // Z_R10 is callee-saved. 187 Register Rtmp1 = Z_R11; 188 Register Rtmp2 = Z_R12; 189 190 // Fetch the exception from TLS and clear out exception related thread state. 191 Address exc_oop_addr = Address(Z_thread, JavaThread::exception_oop_offset()); 192 Address exc_pc_addr = Address(Z_thread, JavaThread::exception_pc_offset()); 193 __ z_lg(Z_EXC_OOP, exc_oop_addr); 194 __ clear_mem(exc_oop_addr, sizeof(oop)); 195 __ clear_mem(exc_pc_addr, sizeof(intptr_t)); 196 197 __ bind(_unwind_handler_entry); 198 __ verify_not_null_oop(Z_EXC_OOP); 199 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 200 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); // Preserve the exception. 201 } 202 203 // Preform needed unlocking. 204 MonitorExitStub* stub = NULL; 205 if (method()->is_synchronized()) { 206 // Runtime1::monitorexit_id expects lock address in Z_R1_scratch. 207 LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch); 208 monitor_address(0, lock); 209 stub = new MonitorExitStub(lock, true, 0); 210 __ unlock_object(Rtmp1, Rtmp2, lock->as_register(), *stub->entry()); 211 __ bind(*stub->continuation()); 212 } 213 214 if (compilation()->env()->dtrace_method_probes()) { 215 ShouldNotReachHere(); // Not supported. 216 #if 0 217 __ mov(rdi, r15_thread); 218 __ mov_metadata(rsi, method()->constant_encoding()); 219 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit))); 220 #endif 221 } 222 223 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) { 224 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); // Restore the exception. 225 } 226 227 // Remove the activation and dispatch to the unwind handler. 228 __ pop_frame(); 229 __ z_lg(Z_EXC_PC, _z_abi16(return_pc), Z_SP); 230 231 // Z_EXC_OOP: exception oop 232 // Z_EXC_PC: exception pc 233 234 // Dispatch to the unwind logic. 235 __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id)); 236 __ z_br(Z_R5); 237 238 // Emit the slow path assembly. 239 if (stub != NULL) { 240 stub->emit_code(this); 241 } 242 243 return offset; 244 } 245 246 int LIR_Assembler::emit_deopt_handler() { 247 // If the last instruction is a call (typically to do a throw which 248 // is coming at the end after block reordering) the return address 249 // must still point into the code area in order to avoid assertion 250 // failures when searching for the corresponding bci. => Add a nop. 251 // (was bug 5/14/1999 - gri) 252 __ nop(); 253 254 // Generate code for exception handler. 255 address handler_base = __ start_a_stub(deopt_handler_size()); 256 if (handler_base == NULL) { 257 // Not enough space left for the handler. 258 bailout("deopt handler overflow"); 259 return -1; 260 } int offset = code_offset(); 261 // Size must be constant (see HandlerImpl::emit_deopt_handler). 262 __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); 263 __ call(Z_R1_scratch); 264 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 265 __ end_a_stub(); 266 267 return offset; 268 } 269 270 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 271 if (o == NULL) { 272 __ clear_reg(reg, true/*64bit*/, false/*set cc*/); // Must not kill cc set by cmove. 273 } else { 274 AddressLiteral a = __ allocate_oop_address(o); 275 bool success = __ load_oop_from_toc(reg, a, reg); 276 if (!success) { 277 bailout("const section overflow"); 278 } 279 } 280 } 281 282 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 283 // Allocate a new index in table to hold the object once it's been patched. 284 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 285 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 286 287 AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(oop_index)); 288 assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 289 // The NULL will be dynamically patched later so the sequence to 290 // load the address literal must not be optimized. 291 __ load_const(reg, addrlit); 292 293 patching_epilog(patch, lir_patch_normal, reg, info); 294 } 295 296 void LIR_Assembler::metadata2reg(Metadata* md, Register reg) { 297 bool success = __ set_metadata_constant(md, reg); 298 if (!success) { 299 bailout("const section overflow"); 300 return; 301 } 302 } 303 304 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 305 // Allocate a new index in table to hold the klass once it's been patched. 306 int index = __ oop_recorder()->allocate_metadata_index(NULL); 307 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 308 AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(index)); 309 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 310 // The NULL will be dynamically patched later so the sequence to 311 // load the address literal must not be optimized. 312 __ load_const(reg, addrlit); 313 314 patching_epilog(patch, lir_patch_normal, reg, info); 315 } 316 317 void LIR_Assembler::emit_op3(LIR_Op3* op) { 318 switch (op->code()) { 319 case lir_idiv: 320 case lir_irem: 321 arithmetic_idiv(op->code(), 322 op->in_opr1(), 323 op->in_opr2(), 324 op->in_opr3(), 325 op->result_opr(), 326 op->info()); 327 break; 328 case lir_fmad: { 329 const FloatRegister opr1 = op->in_opr1()->as_double_reg(), 330 opr2 = op->in_opr2()->as_double_reg(), 331 opr3 = op->in_opr3()->as_double_reg(), 332 res = op->result_opr()->as_double_reg(); 333 __ z_madbr(opr3, opr1, opr2); 334 if (res != opr3) { __ z_ldr(res, opr3); } 335 } break; 336 case lir_fmaf: { 337 const FloatRegister opr1 = op->in_opr1()->as_float_reg(), 338 opr2 = op->in_opr2()->as_float_reg(), 339 opr3 = op->in_opr3()->as_float_reg(), 340 res = op->result_opr()->as_float_reg(); 341 __ z_maebr(opr3, opr1, opr2); 342 if (res != opr3) { __ z_ler(res, opr3); } 343 } break; 344 default: ShouldNotReachHere(); break; 345 } 346 } 347 348 349 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 350 #ifdef ASSERT 351 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 352 if (op->block() != NULL) { _branch_target_blocks.append(op->block()); } 353 if (op->ublock() != NULL) { _branch_target_blocks.append(op->ublock()); } 354 #endif 355 356 if (op->cond() == lir_cond_always) { 357 if (op->info() != NULL) { add_debug_info_for_branch(op->info()); } 358 __ branch_optimized(Assembler::bcondAlways, *(op->label())); 359 } else { 360 Assembler::branch_condition acond = Assembler::bcondZero; 361 if (op->code() == lir_cond_float_branch) { 362 assert(op->ublock() != NULL, "must have unordered successor"); 363 __ branch_optimized(Assembler::bcondNotOrdered, *(op->ublock()->label())); 364 } 365 switch (op->cond()) { 366 case lir_cond_equal: acond = Assembler::bcondEqual; break; 367 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; break; 368 case lir_cond_less: acond = Assembler::bcondLow; break; 369 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; break; 370 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; break; 371 case lir_cond_greater: acond = Assembler::bcondHigh; break; 372 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; break; 373 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; break; 374 default: ShouldNotReachHere(); 375 } 376 __ branch_optimized(acond,*(op->label())); 377 } 378 } 379 380 381 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 382 LIR_Opr src = op->in_opr(); 383 LIR_Opr dest = op->result_opr(); 384 385 switch (op->bytecode()) { 386 case Bytecodes::_i2l: 387 __ move_reg_if_needed(dest->as_register_lo(), T_LONG, src->as_register(), T_INT); 388 break; 389 390 case Bytecodes::_l2i: 391 __ move_reg_if_needed(dest->as_register(), T_INT, src->as_register_lo(), T_LONG); 392 break; 393 394 case Bytecodes::_i2b: 395 __ move_reg_if_needed(dest->as_register(), T_BYTE, src->as_register(), T_INT); 396 break; 397 398 case Bytecodes::_i2c: 399 __ move_reg_if_needed(dest->as_register(), T_CHAR, src->as_register(), T_INT); 400 break; 401 402 case Bytecodes::_i2s: 403 __ move_reg_if_needed(dest->as_register(), T_SHORT, src->as_register(), T_INT); 404 break; 405 406 case Bytecodes::_f2d: 407 assert(dest->is_double_fpu(), "check"); 408 __ move_freg_if_needed(dest->as_double_reg(), T_DOUBLE, src->as_float_reg(), T_FLOAT); 409 break; 410 411 case Bytecodes::_d2f: 412 assert(dest->is_single_fpu(), "check"); 413 __ move_freg_if_needed(dest->as_float_reg(), T_FLOAT, src->as_double_reg(), T_DOUBLE); 414 break; 415 416 case Bytecodes::_i2f: 417 __ z_cefbr(dest->as_float_reg(), src->as_register()); 418 break; 419 420 case Bytecodes::_i2d: 421 __ z_cdfbr(dest->as_double_reg(), src->as_register()); 422 break; 423 424 case Bytecodes::_l2f: 425 __ z_cegbr(dest->as_float_reg(), src->as_register_lo()); 426 break; 427 case Bytecodes::_l2d: 428 __ z_cdgbr(dest->as_double_reg(), src->as_register_lo()); 429 break; 430 431 case Bytecodes::_f2i: 432 case Bytecodes::_f2l: { 433 Label done; 434 FloatRegister Rsrc = src->as_float_reg(); 435 Register Rdst = (op->bytecode() == Bytecodes::_f2i ? dest->as_register() : dest->as_register_lo()); 436 __ clear_reg(Rdst, true, false); 437 __ z_cebr(Rsrc, Rsrc); 438 __ z_brno(done); // NaN -> 0 439 if (op->bytecode() == Bytecodes::_f2i) { 440 __ z_cfebr(Rdst, Rsrc, Assembler::to_zero); 441 } else { // op->bytecode() == Bytecodes::_f2l 442 __ z_cgebr(Rdst, Rsrc, Assembler::to_zero); 443 } 444 __ bind(done); 445 } 446 break; 447 448 case Bytecodes::_d2i: 449 case Bytecodes::_d2l: { 450 Label done; 451 FloatRegister Rsrc = src->as_double_reg(); 452 Register Rdst = (op->bytecode() == Bytecodes::_d2i ? dest->as_register() : dest->as_register_lo()); 453 __ clear_reg(Rdst, true, false); // Don't set CC. 454 __ z_cdbr(Rsrc, Rsrc); 455 __ z_brno(done); // NaN -> 0 456 if (op->bytecode() == Bytecodes::_d2i) { 457 __ z_cfdbr(Rdst, Rsrc, Assembler::to_zero); 458 } else { // Bytecodes::_d2l 459 __ z_cgdbr(Rdst, Rsrc, Assembler::to_zero); 460 } 461 __ bind(done); 462 } 463 break; 464 465 default: ShouldNotReachHere(); 466 } 467 } 468 469 void LIR_Assembler::align_call(LIR_Code code) { 470 // End of call instruction must be 4 byte aligned. 471 int offset = __ offset(); 472 switch (code) { 473 case lir_icvirtual_call: 474 offset += MacroAssembler::load_const_from_toc_size(); 475 // no break 476 case lir_static_call: 477 case lir_optvirtual_call: 478 case lir_dynamic_call: 479 offset += NativeCall::call_far_pcrelative_displacement_offset; 480 break; 481 case lir_virtual_call: // currently, sparc-specific for niagara 482 default: ShouldNotReachHere(); 483 } 484 if ((offset & (NativeCall::call_far_pcrelative_displacement_alignment-1)) != 0) { 485 __ nop(); 486 } 487 } 488 489 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 490 assert((__ offset() + NativeCall::call_far_pcrelative_displacement_offset) % NativeCall::call_far_pcrelative_displacement_alignment == 0, 491 "must be aligned (offset=%d)", __ offset()); 492 assert(rtype == relocInfo::none || 493 rtype == relocInfo::opt_virtual_call_type || 494 rtype == relocInfo::static_call_type, "unexpected rtype"); 495 // Prepend each BRASL with a nop. 496 __ relocate(rtype); 497 __ z_nop(); 498 __ z_brasl(Z_R14, op->addr()); 499 add_call_info(code_offset(), op->info()); 500 } 501 502 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 503 address virtual_call_oop_addr = NULL; 504 AddressLiteral empty_ic((address) Universe::non_oop_word()); 505 virtual_call_oop_addr = __ pc(); 506 bool success = __ load_const_from_toc(Z_inline_cache, empty_ic); 507 if (!success) { 508 bailout("const section overflow"); 509 return; 510 } 511 512 // CALL to fixup routine. Fixup routine uses ScopeDesc info 513 // to determine who we intended to call. 514 __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr)); 515 call(op, relocInfo::none); 516 } 517 518 // not supported 519 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 520 ShouldNotReachHere(); 521 } 522 523 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) { 524 if (from_reg != to_reg) __ z_lgr(to_reg, from_reg); 525 } 526 527 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 528 assert(src->is_constant(), "should not call otherwise"); 529 assert(dest->is_stack(), "should not call otherwise"); 530 LIR_Const* c = src->as_constant_ptr(); 531 532 unsigned int lmem = 0; 533 unsigned int lcon = 0; 534 int64_t cbits = 0; 535 Address dest_addr; 536 switch (c->type()) { 537 case T_INT: // fall through 538 case T_FLOAT: 539 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 540 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 541 break; 542 543 case T_ADDRESS: 544 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 545 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 546 break; 547 548 case T_OBJECT: 549 dest_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 550 if (c->as_jobject() == NULL) { 551 __ store_const(dest_addr, (int64_t)NULL_WORD, 8, 8); 552 } else { 553 jobject2reg(c->as_jobject(), Z_R1_scratch); 554 __ reg2mem_opt(Z_R1_scratch, dest_addr, true); 555 } 556 return; 557 558 case T_LONG: // fall through 559 case T_DOUBLE: 560 dest_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 561 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 562 break; 563 564 default: 565 ShouldNotReachHere(); 566 } 567 568 __ store_const(dest_addr, cbits, lmem, lcon); 569 } 570 571 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 572 assert(src->is_constant(), "should not call otherwise"); 573 assert(dest->is_address(), "should not call otherwise"); 574 // See special case in LIRGenerator::do_StoreIndexed. 575 // T_BYTE: Special case for card mark store. 576 assert(type == T_BYTE || !dest->as_address_ptr()->index()->is_valid(), "not supported"); 577 LIR_Const* c = src->as_constant_ptr(); 578 Address addr = as_Address(dest->as_address_ptr()); 579 580 int store_offset = -1; 581 unsigned int lmem = 0; 582 unsigned int lcon = 0; 583 int64_t cbits = 0; 584 switch (type) { 585 case T_INT: // fall through 586 case T_FLOAT: 587 lmem = 4; lcon = 4; cbits = c->as_jint_bits(); 588 break; 589 590 case T_ADDRESS: 591 lmem = 8; lcon = 4; cbits = c->as_jint_bits(); 592 break; 593 594 case T_OBJECT: // fall through 595 case T_ARRAY: 596 if (c->as_jobject() == NULL) { 597 if (UseCompressedOops && !wide) { 598 store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4); 599 } else { 600 store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8); 601 } 602 } else { 603 jobject2reg(c->as_jobject(), Z_R1_scratch); 604 if (UseCompressedOops && !wide) { 605 __ encode_heap_oop(Z_R1_scratch); 606 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false); 607 } else { 608 store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true); 609 } 610 } 611 assert(store_offset >= 0, "check"); 612 break; 613 614 case T_LONG: // fall through 615 case T_DOUBLE: 616 lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits()); 617 break; 618 619 case T_BOOLEAN: // fall through 620 case T_BYTE: 621 lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint()); 622 break; 623 624 case T_CHAR: // fall through 625 case T_SHORT: 626 lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint()); 627 break; 628 629 default: 630 ShouldNotReachHere(); 631 }; 632 633 // Index register is normally not supported, but for 634 // LIRGenerator::CardTableModRef_post_barrier we make an exception. 635 if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) { 636 __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint())); 637 store_offset = __ offset(); 638 if (Immediate::is_uimm12(addr.disp())) { 639 __ z_stc(Z_R0_scratch, addr); 640 } else { 641 __ z_stcy(Z_R0_scratch, addr); 642 } 643 } 644 645 if (store_offset == -1) { 646 store_offset = __ store_const(addr, cbits, lmem, lcon); 647 assert(store_offset >= 0, "check"); 648 } 649 650 if (info != NULL) { 651 add_debug_info_for_null_check(store_offset, info); 652 } 653 } 654 655 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 656 assert(src->is_constant(), "should not call otherwise"); 657 assert(dest->is_register(), "should not call otherwise"); 658 LIR_Const* c = src->as_constant_ptr(); 659 660 switch (c->type()) { 661 case T_INT: { 662 assert(patch_code == lir_patch_none, "no patching handled here"); 663 __ load_const_optimized(dest->as_register(), c->as_jint()); 664 break; 665 } 666 667 case T_ADDRESS: { 668 assert(patch_code == lir_patch_none, "no patching handled here"); 669 __ load_const_optimized(dest->as_register(), c->as_jint()); 670 break; 671 } 672 673 case T_LONG: { 674 assert(patch_code == lir_patch_none, "no patching handled here"); 675 __ load_const_optimized(dest->as_register_lo(), (intptr_t)c->as_jlong()); 676 break; 677 } 678 679 case T_OBJECT: { 680 if (patch_code != lir_patch_none) { 681 jobject2reg_with_patching(dest->as_register(), info); 682 } else { 683 jobject2reg(c->as_jobject(), dest->as_register()); 684 } 685 break; 686 } 687 688 case T_METADATA: { 689 if (patch_code != lir_patch_none) { 690 klass2reg_with_patching(dest->as_register(), info); 691 } else { 692 metadata2reg(c->as_metadata(), dest->as_register()); 693 } 694 break; 695 } 696 697 case T_FLOAT: { 698 Register toc_reg = Z_R1_scratch; 699 __ load_toc(toc_reg); 700 address const_addr = __ float_constant(c->as_jfloat()); 701 if (const_addr == NULL) { 702 bailout("const section overflow"); 703 break; 704 } 705 int displ = const_addr - _masm->code()->consts()->start(); 706 if (dest->is_single_fpu()) { 707 __ z_ley(dest->as_float_reg(), displ, toc_reg); 708 } else { 709 assert(dest->is_single_cpu(), "Must be a cpu register."); 710 __ z_ly(dest->as_register(), displ, toc_reg); 711 } 712 } 713 break; 714 715 case T_DOUBLE: { 716 Register toc_reg = Z_R1_scratch; 717 __ load_toc(toc_reg); 718 address const_addr = __ double_constant(c->as_jdouble()); 719 if (const_addr == NULL) { 720 bailout("const section overflow"); 721 break; 722 } 723 int displ = const_addr - _masm->code()->consts()->start(); 724 if (dest->is_double_fpu()) { 725 __ z_ldy(dest->as_double_reg(), displ, toc_reg); 726 } else { 727 assert(dest->is_double_cpu(), "Must be a long register."); 728 __ z_lg(dest->as_register_lo(), displ, toc_reg); 729 } 730 } 731 break; 732 733 default: 734 ShouldNotReachHere(); 735 } 736 } 737 738 Address LIR_Assembler::as_Address(LIR_Address* addr) { 739 if (addr->base()->is_illegal()) { 740 Unimplemented(); 741 } 742 743 Register base = addr->base()->as_pointer_register(); 744 745 if (addr->index()->is_illegal()) { 746 return Address(base, addr->disp()); 747 } else if (addr->index()->is_cpu_register()) { 748 Register index = addr->index()->as_pointer_register(); 749 return Address(base, index, addr->disp()); 750 } else if (addr->index()->is_constant()) { 751 intptr_t addr_offset = addr->index()->as_constant_ptr()->as_jint() + addr->disp(); 752 return Address(base, addr_offset); 753 } else { 754 ShouldNotReachHere(); 755 return Address(); 756 } 757 } 758 759 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 760 switch (type) { 761 case T_INT: 762 case T_FLOAT: { 763 Register tmp = Z_R1_scratch; 764 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 765 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 766 __ mem2reg_opt(tmp, from, false); 767 __ reg2mem_opt(tmp, to, false); 768 break; 769 } 770 case T_ADDRESS: 771 case T_OBJECT: { 772 Register tmp = Z_R1_scratch; 773 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 774 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 775 __ mem2reg_opt(tmp, from, true); 776 __ reg2mem_opt(tmp, to, true); 777 break; 778 } 779 case T_LONG: 780 case T_DOUBLE: { 781 Register tmp = Z_R1_scratch; 782 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 783 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 784 __ mem2reg_opt(tmp, from, true); 785 __ reg2mem_opt(tmp, to, true); 786 break; 787 } 788 789 default: 790 ShouldNotReachHere(); 791 } 792 } 793 794 // 4-byte accesses only! Don't use it to access 8 bytes! 795 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 796 ShouldNotCallThis(); 797 return 0; // unused 798 } 799 800 // 4-byte accesses only! Don't use it to access 8 bytes! 801 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 802 ShouldNotCallThis(); 803 return 0; // unused 804 } 805 806 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, 807 CodeEmitInfo* info, bool wide, bool unaligned) { 808 809 assert(type != T_METADATA, "load of metadata ptr not supported"); 810 LIR_Address* addr = src_opr->as_address_ptr(); 811 LIR_Opr to_reg = dest; 812 813 Register src = addr->base()->as_pointer_register(); 814 Register disp_reg = Z_R0; 815 int disp_value = addr->disp(); 816 bool needs_patching = (patch_code != lir_patch_none); 817 818 if (addr->base()->type() == T_OBJECT) { 819 __ verify_oop(src); 820 } 821 822 PatchingStub* patch = NULL; 823 if (needs_patching) { 824 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 825 assert(!to_reg->is_double_cpu() || 826 patch_code == lir_patch_none || 827 patch_code == lir_patch_normal, "patching doesn't match register"); 828 } 829 830 if (addr->index()->is_illegal()) { 831 if (!Immediate::is_simm20(disp_value)) { 832 if (needs_patching) { 833 __ load_const(Z_R1_scratch, (intptr_t)0); 834 } else { 835 __ load_const_optimized(Z_R1_scratch, disp_value); 836 } 837 disp_reg = Z_R1_scratch; 838 disp_value = 0; 839 } 840 } else { 841 if (!Immediate::is_simm20(disp_value)) { 842 __ load_const_optimized(Z_R1_scratch, disp_value); 843 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 844 disp_reg = Z_R1_scratch; 845 disp_value = 0; 846 } 847 disp_reg = addr->index()->as_pointer_register(); 848 } 849 850 // Remember the offset of the load. The patching_epilog must be done 851 // before the call to add_debug_info, otherwise the PcDescs don't get 852 // entered in increasing order. 853 int offset = code_offset(); 854 855 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 856 857 bool short_disp = Immediate::is_uimm12(disp_value); 858 859 switch (type) { 860 case T_BOOLEAN: // fall through 861 case T_BYTE : __ z_lb(dest->as_register(), disp_value, disp_reg, src); break; 862 case T_CHAR : __ z_llgh(dest->as_register(), disp_value, disp_reg, src); break; 863 case T_SHORT : 864 if (short_disp) { 865 __ z_lh(dest->as_register(), disp_value, disp_reg, src); 866 } else { 867 __ z_lhy(dest->as_register(), disp_value, disp_reg, src); 868 } 869 break; 870 case T_INT : 871 if (short_disp) { 872 __ z_l(dest->as_register(), disp_value, disp_reg, src); 873 } else { 874 __ z_ly(dest->as_register(), disp_value, disp_reg, src); 875 } 876 break; 877 case T_ADDRESS: 878 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) { 879 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 880 __ decode_klass_not_null(dest->as_register()); 881 } else { 882 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 883 } 884 break; 885 case T_ARRAY : // fall through 886 case T_OBJECT: 887 { 888 if (UseCompressedOops && !wide) { 889 __ z_llgf(dest->as_register(), disp_value, disp_reg, src); 890 __ oop_decoder(dest->as_register(), dest->as_register(), true); 891 } else { 892 __ z_lg(dest->as_register(), disp_value, disp_reg, src); 893 } 894 break; 895 } 896 case T_FLOAT: 897 if (short_disp) { 898 __ z_le(dest->as_float_reg(), disp_value, disp_reg, src); 899 } else { 900 __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src); 901 } 902 break; 903 case T_DOUBLE: 904 if (short_disp) { 905 __ z_ld(dest->as_double_reg(), disp_value, disp_reg, src); 906 } else { 907 __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src); 908 } 909 break; 910 case T_LONG : __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break; 911 default : ShouldNotReachHere(); 912 } 913 if (type == T_ARRAY || type == T_OBJECT) { 914 __ verify_oop(dest->as_register()); 915 } 916 917 if (patch != NULL) { 918 patching_epilog(patch, patch_code, src, info); 919 } 920 if (info != NULL) add_debug_info_for_null_check(offset, info); 921 } 922 923 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 924 assert(src->is_stack(), "should not call otherwise"); 925 assert(dest->is_register(), "should not call otherwise"); 926 927 if (dest->is_single_cpu()) { 928 if (type == T_ARRAY || type == T_OBJECT) { 929 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 930 __ verify_oop(dest->as_register()); 931 } else if (type == T_METADATA) { 932 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true); 933 } else { 934 __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false); 935 } 936 } else if (dest->is_double_cpu()) { 937 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix()); 938 __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true); 939 } else if (dest->is_single_fpu()) { 940 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix()); 941 __ mem2freg_opt(dest->as_float_reg(), src_addr, false); 942 } else if (dest->is_double_fpu()) { 943 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix()); 944 __ mem2freg_opt(dest->as_double_reg(), src_addr, true); 945 } else { 946 ShouldNotReachHere(); 947 } 948 } 949 950 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 951 assert(src->is_register(), "should not call otherwise"); 952 assert(dest->is_stack(), "should not call otherwise"); 953 954 if (src->is_single_cpu()) { 955 const Address dst = frame_map()->address_for_slot(dest->single_stack_ix()); 956 if (type == T_OBJECT || type == T_ARRAY) { 957 __ verify_oop(src->as_register()); 958 __ reg2mem_opt(src->as_register(), dst, true); 959 } else if (type == T_METADATA) { 960 __ reg2mem_opt(src->as_register(), dst, true); 961 } else { 962 __ reg2mem_opt(src->as_register(), dst, false); 963 } 964 } else if (src->is_double_cpu()) { 965 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix()); 966 __ reg2mem_opt(src->as_register_lo(), dstLO, true); 967 } else if (src->is_single_fpu()) { 968 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix()); 969 __ freg2mem_opt(src->as_float_reg(), dst_addr, false); 970 } else if (src->is_double_fpu()) { 971 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix()); 972 __ freg2mem_opt(src->as_double_reg(), dst_addr, true); 973 } else { 974 ShouldNotReachHere(); 975 } 976 } 977 978 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 979 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 980 if (from_reg->is_double_fpu()) { 981 // double to double moves 982 assert(to_reg->is_double_fpu(), "should match"); 983 __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg()); 984 } else { 985 // float to float moves 986 assert(to_reg->is_single_fpu(), "should match"); 987 __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg()); 988 } 989 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 990 if (from_reg->is_double_cpu()) { 991 __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 992 } else if (to_reg->is_double_cpu()) { 993 // int to int moves 994 __ z_lgr(to_reg->as_register_lo(), from_reg->as_register()); 995 } else { 996 // int to int moves 997 __ z_lgr(to_reg->as_register(), from_reg->as_register()); 998 } 999 } else { 1000 ShouldNotReachHere(); 1001 } 1002 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1003 __ verify_oop(to_reg->as_register()); 1004 } 1005 } 1006 1007 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type, 1008 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1009 bool wide, bool unaligned) { 1010 assert(type != T_METADATA, "store of metadata ptr not supported"); 1011 LIR_Address* addr = dest_opr->as_address_ptr(); 1012 1013 Register dest = addr->base()->as_pointer_register(); 1014 Register disp_reg = Z_R0; 1015 int disp_value = addr->disp(); 1016 bool needs_patching = (patch_code != lir_patch_none); 1017 1018 if (addr->base()->is_oop_register()) { 1019 __ verify_oop(dest); 1020 } 1021 1022 PatchingStub* patch = NULL; 1023 if (needs_patching) { 1024 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1025 assert(!from->is_double_cpu() || 1026 patch_code == lir_patch_none || 1027 patch_code == lir_patch_normal, "patching doesn't match register"); 1028 } 1029 1030 assert(!needs_patching || (!Immediate::is_simm20(disp_value) && addr->index()->is_illegal()), "assumption"); 1031 if (addr->index()->is_illegal()) { 1032 if (!Immediate::is_simm20(disp_value)) { 1033 if (needs_patching) { 1034 __ load_const(Z_R1_scratch, (intptr_t)0); 1035 } else { 1036 __ load_const_optimized(Z_R1_scratch, disp_value); 1037 } 1038 disp_reg = Z_R1_scratch; 1039 disp_value = 0; 1040 } 1041 } else { 1042 if (!Immediate::is_simm20(disp_value)) { 1043 __ load_const_optimized(Z_R1_scratch, disp_value); 1044 __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register()); 1045 disp_reg = Z_R1_scratch; 1046 disp_value = 0; 1047 } 1048 disp_reg = addr->index()->as_pointer_register(); 1049 } 1050 1051 assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up"); 1052 1053 if (type == T_ARRAY || type == T_OBJECT) { 1054 __ verify_oop(from->as_register()); 1055 } 1056 1057 bool short_disp = Immediate::is_uimm12(disp_value); 1058 1059 // Remember the offset of the store. The patching_epilog must be done 1060 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1061 // entered in increasing order. 1062 int offset = code_offset(); 1063 switch (type) { 1064 case T_BOOLEAN: // fall through 1065 case T_BYTE : 1066 if (short_disp) { 1067 __ z_stc(from->as_register(), disp_value, disp_reg, dest); 1068 } else { 1069 __ z_stcy(from->as_register(), disp_value, disp_reg, dest); 1070 } 1071 break; 1072 case T_CHAR : // fall through 1073 case T_SHORT : 1074 if (short_disp) { 1075 __ z_sth(from->as_register(), disp_value, disp_reg, dest); 1076 } else { 1077 __ z_sthy(from->as_register(), disp_value, disp_reg, dest); 1078 } 1079 break; 1080 case T_INT : 1081 if (short_disp) { 1082 __ z_st(from->as_register(), disp_value, disp_reg, dest); 1083 } else { 1084 __ z_sty(from->as_register(), disp_value, disp_reg, dest); 1085 } 1086 break; 1087 case T_LONG : __ z_stg(from->as_register_lo(), disp_value, disp_reg, dest); break; 1088 case T_ADDRESS: __ z_stg(from->as_register(), disp_value, disp_reg, dest); break; 1089 break; 1090 case T_ARRAY : // fall through 1091 case T_OBJECT: 1092 { 1093 if (UseCompressedOops && !wide) { 1094 Register compressed_src = Z_R14; 1095 __ oop_encoder(compressed_src, from->as_register(), true, (disp_reg != Z_R1) ? Z_R1 : Z_R0, -1, true); 1096 offset = code_offset(); 1097 if (short_disp) { 1098 __ z_st(compressed_src, disp_value, disp_reg, dest); 1099 } else { 1100 __ z_sty(compressed_src, disp_value, disp_reg, dest); 1101 } 1102 } else { 1103 __ z_stg(from->as_register(), disp_value, disp_reg, dest); 1104 } 1105 break; 1106 } 1107 case T_FLOAT : 1108 if (short_disp) { 1109 __ z_ste(from->as_float_reg(), disp_value, disp_reg, dest); 1110 } else { 1111 __ z_stey(from->as_float_reg(), disp_value, disp_reg, dest); 1112 } 1113 break; 1114 case T_DOUBLE: 1115 if (short_disp) { 1116 __ z_std(from->as_double_reg(), disp_value, disp_reg, dest); 1117 } else { 1118 __ z_stdy(from->as_double_reg(), disp_value, disp_reg, dest); 1119 } 1120 break; 1121 default: ShouldNotReachHere(); 1122 } 1123 1124 if (patch != NULL) { 1125 patching_epilog(patch, patch_code, dest, info); 1126 } 1127 1128 if (info != NULL) add_debug_info_for_null_check(offset, info); 1129 } 1130 1131 1132 void LIR_Assembler::return_op(LIR_Opr result) { 1133 assert(result->is_illegal() || 1134 (result->is_single_cpu() && result->as_register() == Z_R2) || 1135 (result->is_double_cpu() && result->as_register_lo() == Z_R2) || 1136 (result->is_single_fpu() && result->as_float_reg() == Z_F0) || 1137 (result->is_double_fpu() && result->as_double_reg() == Z_F0), "convention"); 1138 1139 if (SafepointMechanism::uses_thread_local_poll()) { 1140 __ z_lg(Z_R1_scratch, Address(Z_thread, Thread::polling_page_offset())); 1141 } else { 1142 AddressLiteral pp(os::get_polling_page()); 1143 __ load_const_optimized(Z_R1_scratch, pp); 1144 } 1145 1146 // Pop the frame before the safepoint code. 1147 __ pop_frame_restore_retPC(initial_frame_size_in_bytes()); 1148 1149 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1150 __ reserved_stack_check(Z_R14); 1151 } 1152 1153 // We need to mark the code position where the load from the safepoint 1154 // polling page was emitted as relocInfo::poll_return_type here. 1155 __ relocate(relocInfo::poll_return_type); 1156 __ load_from_polling_page(Z_R1_scratch); 1157 1158 __ z_br(Z_R14); // Return to caller. 1159 } 1160 1161 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1162 const Register poll_addr = tmp->as_register_lo(); 1163 if (SafepointMechanism::uses_thread_local_poll()) { 1164 __ z_lg(poll_addr, Address(Z_thread, Thread::polling_page_offset())); 1165 } else { 1166 AddressLiteral pp(os::get_polling_page()); 1167 __ load_const_optimized(poll_addr, pp); 1168 } 1169 guarantee(info != NULL, "Shouldn't be NULL"); 1170 add_debug_info_for_branch(info); 1171 int offset = __ offset(); 1172 __ relocate(relocInfo::poll_type); 1173 __ load_from_polling_page(poll_addr); 1174 return offset; 1175 } 1176 1177 void LIR_Assembler::emit_static_call_stub() { 1178 1179 // Stub is fixed up when the corresponding call is converted from calling 1180 // compiled code to calling interpreted code. 1181 1182 address call_pc = __ pc(); 1183 address stub = __ start_a_stub(call_stub_size()); 1184 if (stub == NULL) { 1185 bailout("static call stub overflow"); 1186 return; 1187 } 1188 1189 int start = __ offset(); 1190 1191 __ relocate(static_stub_Relocation::spec(call_pc)); 1192 1193 // See also Matcher::interpreter_method_oop_reg(). 1194 AddressLiteral meta = __ allocate_metadata_address(NULL); 1195 bool success = __ load_const_from_toc(Z_method, meta); 1196 1197 __ set_inst_mark(); 1198 AddressLiteral a((address)-1); 1199 success = success && __ load_const_from_toc(Z_R1, a); 1200 if (!success) { 1201 bailout("const section overflow"); 1202 return; 1203 } 1204 1205 __ z_br(Z_R1); 1206 assert(__ offset() - start <= call_stub_size(), "stub too big"); 1207 __ end_a_stub(); // Update current stubs pointer and restore insts_end. 1208 } 1209 1210 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1211 bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual; 1212 if (opr1->is_single_cpu()) { 1213 Register reg1 = opr1->as_register(); 1214 if (opr2->is_single_cpu()) { 1215 // cpu register - cpu register 1216 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1217 __ z_clgr(reg1, opr2->as_register()); 1218 } else { 1219 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?"); 1220 if (unsigned_comp) { 1221 __ z_clr(reg1, opr2->as_register()); 1222 } else { 1223 __ z_cr(reg1, opr2->as_register()); 1224 } 1225 } 1226 } else if (opr2->is_stack()) { 1227 // cpu register - stack 1228 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1229 __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1230 } else { 1231 if (unsigned_comp) { 1232 __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1233 } else { 1234 __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix())); 1235 } 1236 } 1237 } else if (opr2->is_constant()) { 1238 // cpu register - constant 1239 LIR_Const* c = opr2->as_constant_ptr(); 1240 if (c->type() == T_INT) { 1241 if (unsigned_comp) { 1242 __ z_clfi(reg1, c->as_jint()); 1243 } else { 1244 __ z_cfi(reg1, c->as_jint()); 1245 } 1246 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) { 1247 // In 64bit oops are single register. 1248 jobject o = c->as_jobject(); 1249 if (o == NULL) { 1250 __ z_ltgr(reg1, reg1); 1251 } else { 1252 jobject2reg(o, Z_R1_scratch); 1253 __ z_cgr(reg1, Z_R1_scratch); 1254 } 1255 } else { 1256 fatal("unexpected type: %s", basictype_to_str(c->type())); 1257 } 1258 // cpu register - address 1259 } else if (opr2->is_address()) { 1260 if (op->info() != NULL) { 1261 add_debug_info_for_null_check_here(op->info()); 1262 } 1263 if (unsigned_comp) { 1264 __ z_cly(reg1, as_Address(opr2->as_address_ptr())); 1265 } else { 1266 __ z_cy(reg1, as_Address(opr2->as_address_ptr())); 1267 } 1268 } else { 1269 ShouldNotReachHere(); 1270 } 1271 1272 } else if (opr1->is_double_cpu()) { 1273 assert(!unsigned_comp, "unexpected"); 1274 Register xlo = opr1->as_register_lo(); 1275 Register xhi = opr1->as_register_hi(); 1276 if (opr2->is_double_cpu()) { 1277 __ z_cgr(xlo, opr2->as_register_lo()); 1278 } else if (opr2->is_constant()) { 1279 // cpu register - constant 0 1280 assert(opr2->as_jlong() == (jlong)0, "only handles zero"); 1281 __ z_ltgr(xlo, xlo); 1282 } else { 1283 ShouldNotReachHere(); 1284 } 1285 1286 } else if (opr1->is_single_fpu()) { 1287 if (opr2->is_single_fpu()) { 1288 __ z_cebr(opr1->as_float_reg(), opr2->as_float_reg()); 1289 } else { 1290 // stack slot 1291 Address addr = frame_map()->address_for_slot(opr2->single_stack_ix()); 1292 if (Immediate::is_uimm12(addr.disp())) { 1293 __ z_ceb(opr1->as_float_reg(), addr); 1294 } else { 1295 __ z_ley(Z_fscratch_1, addr); 1296 __ z_cebr(opr1->as_float_reg(), Z_fscratch_1); 1297 } 1298 } 1299 } else if (opr1->is_double_fpu()) { 1300 if (opr2->is_double_fpu()) { 1301 __ z_cdbr(opr1->as_double_reg(), opr2->as_double_reg()); 1302 } else { 1303 // stack slot 1304 Address addr = frame_map()->address_for_slot(opr2->double_stack_ix()); 1305 if (Immediate::is_uimm12(addr.disp())) { 1306 __ z_cdb(opr1->as_double_reg(), addr); 1307 } else { 1308 __ z_ldy(Z_fscratch_1, addr); 1309 __ z_cdbr(opr1->as_double_reg(), Z_fscratch_1); 1310 } 1311 } 1312 } else { 1313 ShouldNotReachHere(); 1314 } 1315 } 1316 1317 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) { 1318 Label done; 1319 Register dreg = dst->as_register(); 1320 1321 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1322 assert((left->is_single_fpu() && right->is_single_fpu()) || 1323 (left->is_double_fpu() && right->is_double_fpu()), "unexpected operand types"); 1324 bool is_single = left->is_single_fpu(); 1325 bool is_unordered_less = (code == lir_ucmp_fd2i); 1326 FloatRegister lreg = is_single ? left->as_float_reg() : left->as_double_reg(); 1327 FloatRegister rreg = is_single ? right->as_float_reg() : right->as_double_reg(); 1328 if (is_single) { 1329 __ z_cebr(lreg, rreg); 1330 } else { 1331 __ z_cdbr(lreg, rreg); 1332 } 1333 if (VM_Version::has_LoadStoreConditional()) { 1334 Register one = Z_R0_scratch; 1335 Register minus_one = Z_R1_scratch; 1336 __ z_lghi(minus_one, -1); 1337 __ z_lghi(one, 1); 1338 __ z_lghi(dreg, 0); 1339 __ z_locgr(dreg, one, is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered); 1340 __ z_locgr(dreg, minus_one, is_unordered_less ? Assembler::bcondLowOrNotOrdered : Assembler::bcondLow); 1341 } else { 1342 __ clear_reg(dreg, true, false); 1343 __ z_bre(done); // if (left == right) dst = 0 1344 1345 // if (left > right || ((code ~= cmpg) && (left <> right)) dst := 1 1346 __ z_lhi(dreg, 1); 1347 __ z_brc(is_unordered_less ? Assembler::bcondHigh : Assembler::bcondHighOrNotOrdered, done); 1348 1349 // if (left < right || ((code ~= cmpl) && (left <> right)) dst := -1 1350 __ z_lhi(dreg, -1); 1351 } 1352 } else { 1353 assert(code == lir_cmp_l2i, "check"); 1354 if (VM_Version::has_LoadStoreConditional()) { 1355 Register one = Z_R0_scratch; 1356 Register minus_one = Z_R1_scratch; 1357 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1358 __ z_lghi(minus_one, -1); 1359 __ z_lghi(one, 1); 1360 __ z_lghi(dreg, 0); 1361 __ z_locgr(dreg, one, Assembler::bcondHigh); 1362 __ z_locgr(dreg, minus_one, Assembler::bcondLow); 1363 } else { 1364 __ z_cgr(left->as_register_lo(), right->as_register_lo()); 1365 __ z_lghi(dreg, 0); // eq value 1366 __ z_bre(done); 1367 __ z_lghi(dreg, 1); // gt value 1368 __ z_brh(done); 1369 __ z_lghi(dreg, -1); // lt value 1370 } 1371 } 1372 __ bind(done); 1373 } 1374 1375 // result = condition ? opr1 : opr2 1376 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1377 Assembler::branch_condition acond = Assembler::bcondEqual, ncond = Assembler::bcondNotEqual; 1378 switch (condition) { 1379 case lir_cond_equal: acond = Assembler::bcondEqual; ncond = Assembler::bcondNotEqual; break; 1380 case lir_cond_notEqual: acond = Assembler::bcondNotEqual; ncond = Assembler::bcondEqual; break; 1381 case lir_cond_less: acond = Assembler::bcondLow; ncond = Assembler::bcondNotLow; break; 1382 case lir_cond_lessEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1383 case lir_cond_greaterEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1384 case lir_cond_greater: acond = Assembler::bcondHigh; ncond = Assembler::bcondNotHigh; break; 1385 case lir_cond_belowEqual: acond = Assembler::bcondNotHigh; ncond = Assembler::bcondHigh; break; 1386 case lir_cond_aboveEqual: acond = Assembler::bcondNotLow; ncond = Assembler::bcondLow; break; 1387 default: ShouldNotReachHere(); 1388 } 1389 1390 if (opr1->is_cpu_register()) { 1391 reg2reg(opr1, result); 1392 } else if (opr1->is_stack()) { 1393 stack2reg(opr1, result, result->type()); 1394 } else if (opr1->is_constant()) { 1395 const2reg(opr1, result, lir_patch_none, NULL); 1396 } else { 1397 ShouldNotReachHere(); 1398 } 1399 1400 if (VM_Version::has_LoadStoreConditional() && !opr2->is_constant()) { 1401 // Optimized version that does not require a branch. 1402 if (opr2->is_single_cpu()) { 1403 assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move"); 1404 __ z_locgr(result->as_register(), opr2->as_register(), ncond); 1405 } else if (opr2->is_double_cpu()) { 1406 assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1407 assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move"); 1408 __ z_locgr(result->as_register_lo(), opr2->as_register_lo(), ncond); 1409 } else if (opr2->is_single_stack()) { 1410 __ z_loc(result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()), ncond); 1411 } else if (opr2->is_double_stack()) { 1412 __ z_locg(result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix()), ncond); 1413 } else { 1414 ShouldNotReachHere(); 1415 } 1416 } else { 1417 Label skip; 1418 __ z_brc(acond, skip); 1419 if (opr2->is_cpu_register()) { 1420 reg2reg(opr2, result); 1421 } else if (opr2->is_stack()) { 1422 stack2reg(opr2, result, result->type()); 1423 } else if (opr2->is_constant()) { 1424 const2reg(opr2, result, lir_patch_none, NULL); 1425 } else { 1426 ShouldNotReachHere(); 1427 } 1428 __ bind(skip); 1429 } 1430 } 1431 1432 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1433 CodeEmitInfo* info, bool pop_fpu_stack) { 1434 assert(info == NULL, "should never be used, idiv/irem and ldiv/lrem not handled by this method"); 1435 1436 if (left->is_single_cpu()) { 1437 assert(left == dest, "left and dest must be equal"); 1438 Register lreg = left->as_register(); 1439 1440 if (right->is_single_cpu()) { 1441 // cpu register - cpu register 1442 Register rreg = right->as_register(); 1443 switch (code) { 1444 case lir_add: __ z_ar (lreg, rreg); break; 1445 case lir_sub: __ z_sr (lreg, rreg); break; 1446 case lir_mul: __ z_msr(lreg, rreg); break; 1447 default: ShouldNotReachHere(); 1448 } 1449 1450 } else if (right->is_stack()) { 1451 // cpu register - stack 1452 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1453 switch (code) { 1454 case lir_add: __ z_ay(lreg, raddr); break; 1455 case lir_sub: __ z_sy(lreg, raddr); break; 1456 default: ShouldNotReachHere(); 1457 } 1458 1459 } else if (right->is_constant()) { 1460 // cpu register - constant 1461 jint c = right->as_constant_ptr()->as_jint(); 1462 switch (code) { 1463 case lir_add: __ z_agfi(lreg, c); break; 1464 case lir_sub: __ z_agfi(lreg, -c); break; // note: -min_jint == min_jint 1465 case lir_mul: __ z_msfi(lreg, c); break; 1466 default: ShouldNotReachHere(); 1467 } 1468 1469 } else { 1470 ShouldNotReachHere(); 1471 } 1472 1473 } else if (left->is_double_cpu()) { 1474 assert(left == dest, "left and dest must be equal"); 1475 Register lreg_lo = left->as_register_lo(); 1476 Register lreg_hi = left->as_register_hi(); 1477 1478 if (right->is_double_cpu()) { 1479 // cpu register - cpu register 1480 Register rreg_lo = right->as_register_lo(); 1481 Register rreg_hi = right->as_register_hi(); 1482 assert_different_registers(lreg_lo, rreg_lo); 1483 switch (code) { 1484 case lir_add: 1485 __ z_agr(lreg_lo, rreg_lo); 1486 break; 1487 case lir_sub: 1488 __ z_sgr(lreg_lo, rreg_lo); 1489 break; 1490 case lir_mul: 1491 __ z_msgr(lreg_lo, rreg_lo); 1492 break; 1493 default: 1494 ShouldNotReachHere(); 1495 } 1496 1497 } else if (right->is_constant()) { 1498 // cpu register - constant 1499 jlong c = right->as_constant_ptr()->as_jlong_bits(); 1500 switch (code) { 1501 case lir_add: __ z_agfi(lreg_lo, c); break; 1502 case lir_sub: 1503 if (c != min_jint) { 1504 __ z_agfi(lreg_lo, -c); 1505 } else { 1506 // -min_jint cannot be represented as simm32 in z_agfi 1507 // min_jint sign extended: 0xffffffff80000000 1508 // -min_jint as 64 bit integer: 0x0000000080000000 1509 // 0x80000000 can be represented as uimm32 in z_algfi 1510 // lreg_lo := lreg_lo + -min_jint == lreg_lo + 0x80000000 1511 __ z_algfi(lreg_lo, UCONST64(0x80000000)); 1512 } 1513 break; 1514 case lir_mul: __ z_msgfi(lreg_lo, c); break; 1515 default: 1516 ShouldNotReachHere(); 1517 } 1518 1519 } else { 1520 ShouldNotReachHere(); 1521 } 1522 1523 } else if (left->is_single_fpu()) { 1524 assert(left == dest, "left and dest must be equal"); 1525 FloatRegister lreg = left->as_float_reg(); 1526 FloatRegister rreg = right->is_single_fpu() ? right->as_float_reg() : fnoreg; 1527 Address raddr; 1528 1529 if (rreg == fnoreg) { 1530 assert(right->is_single_stack(), "constants should be loaded into register"); 1531 raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1532 if (!Immediate::is_uimm12(raddr.disp())) { 1533 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, false); 1534 } 1535 } 1536 1537 if (rreg != fnoreg) { 1538 switch (code) { 1539 case lir_add: __ z_aebr(lreg, rreg); break; 1540 case lir_sub: __ z_sebr(lreg, rreg); break; 1541 case lir_mul_strictfp: // fall through 1542 case lir_mul: __ z_meebr(lreg, rreg); break; 1543 case lir_div_strictfp: // fall through 1544 case lir_div: __ z_debr(lreg, rreg); break; 1545 default: ShouldNotReachHere(); 1546 } 1547 } else { 1548 switch (code) { 1549 case lir_add: __ z_aeb(lreg, raddr); break; 1550 case lir_sub: __ z_seb(lreg, raddr); break; 1551 case lir_mul_strictfp: // fall through 1552 case lir_mul: __ z_meeb(lreg, raddr); break; 1553 case lir_div_strictfp: // fall through 1554 case lir_div: __ z_deb(lreg, raddr); break; 1555 default: ShouldNotReachHere(); 1556 } 1557 } 1558 } else if (left->is_double_fpu()) { 1559 assert(left == dest, "left and dest must be equal"); 1560 FloatRegister lreg = left->as_double_reg(); 1561 FloatRegister rreg = right->is_double_fpu() ? right->as_double_reg() : fnoreg; 1562 Address raddr; 1563 1564 if (rreg == fnoreg) { 1565 assert(right->is_double_stack(), "constants should be loaded into register"); 1566 raddr = frame_map()->address_for_slot(right->double_stack_ix()); 1567 if (!Immediate::is_uimm12(raddr.disp())) { 1568 __ mem2freg_opt(rreg = Z_fscratch_1, raddr, true); 1569 } 1570 } 1571 1572 if (rreg != fnoreg) { 1573 switch (code) { 1574 case lir_add: __ z_adbr(lreg, rreg); break; 1575 case lir_sub: __ z_sdbr(lreg, rreg); break; 1576 case lir_mul_strictfp: // fall through 1577 case lir_mul: __ z_mdbr(lreg, rreg); break; 1578 case lir_div_strictfp: // fall through 1579 case lir_div: __ z_ddbr(lreg, rreg); break; 1580 default: ShouldNotReachHere(); 1581 } 1582 } else { 1583 switch (code) { 1584 case lir_add: __ z_adb(lreg, raddr); break; 1585 case lir_sub: __ z_sdb(lreg, raddr); break; 1586 case lir_mul_strictfp: // fall through 1587 case lir_mul: __ z_mdb(lreg, raddr); break; 1588 case lir_div_strictfp: // fall through 1589 case lir_div: __ z_ddb(lreg, raddr); break; 1590 default: ShouldNotReachHere(); 1591 } 1592 } 1593 } else if (left->is_address()) { 1594 assert(left == dest, "left and dest must be equal"); 1595 assert(code == lir_add, "unsupported operation"); 1596 assert(right->is_constant(), "unsupported operand"); 1597 jint c = right->as_constant_ptr()->as_jint(); 1598 LIR_Address* lir_addr = left->as_address_ptr(); 1599 Address addr = as_Address(lir_addr); 1600 switch (lir_addr->type()) { 1601 case T_INT: 1602 __ add2mem_32(addr, c, Z_R1_scratch); 1603 break; 1604 case T_LONG: 1605 __ add2mem_64(addr, c, Z_R1_scratch); 1606 break; 1607 default: 1608 ShouldNotReachHere(); 1609 } 1610 } else { 1611 ShouldNotReachHere(); 1612 } 1613 } 1614 1615 void LIR_Assembler::fpop() { 1616 // do nothing 1617 } 1618 1619 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1620 switch (code) { 1621 case lir_sqrt: { 1622 assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt"); 1623 FloatRegister src_reg = value->as_double_reg(); 1624 FloatRegister dst_reg = dest->as_double_reg(); 1625 __ z_sqdbr(dst_reg, src_reg); 1626 break; 1627 } 1628 case lir_abs: { 1629 assert(!thread->is_valid(), "there is no need for a thread_reg for fabs"); 1630 FloatRegister src_reg = value->as_double_reg(); 1631 FloatRegister dst_reg = dest->as_double_reg(); 1632 __ z_lpdbr(dst_reg, src_reg); 1633 break; 1634 } 1635 default: { 1636 ShouldNotReachHere(); 1637 break; 1638 } 1639 } 1640 } 1641 1642 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst) { 1643 if (left->is_single_cpu()) { 1644 Register reg = left->as_register(); 1645 if (right->is_constant()) { 1646 int val = right->as_constant_ptr()->as_jint(); 1647 switch (code) { 1648 case lir_logic_and: __ z_nilf(reg, val); break; 1649 case lir_logic_or: __ z_oilf(reg, val); break; 1650 case lir_logic_xor: __ z_xilf(reg, val); break; 1651 default: ShouldNotReachHere(); 1652 } 1653 } else if (right->is_stack()) { 1654 Address raddr = frame_map()->address_for_slot(right->single_stack_ix()); 1655 switch (code) { 1656 case lir_logic_and: __ z_ny(reg, raddr); break; 1657 case lir_logic_or: __ z_oy(reg, raddr); break; 1658 case lir_logic_xor: __ z_xy(reg, raddr); break; 1659 default: ShouldNotReachHere(); 1660 } 1661 } else { 1662 Register rright = right->as_register(); 1663 switch (code) { 1664 case lir_logic_and: __ z_nr(reg, rright); break; 1665 case lir_logic_or : __ z_or(reg, rright); break; 1666 case lir_logic_xor: __ z_xr(reg, rright); break; 1667 default: ShouldNotReachHere(); 1668 } 1669 } 1670 move_regs(reg, dst->as_register()); 1671 } else { 1672 Register l_lo = left->as_register_lo(); 1673 if (right->is_constant()) { 1674 __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong()); 1675 switch (code) { 1676 case lir_logic_and: 1677 __ z_ngr(l_lo, Z_R1_scratch); 1678 break; 1679 case lir_logic_or: 1680 __ z_ogr(l_lo, Z_R1_scratch); 1681 break; 1682 case lir_logic_xor: 1683 __ z_xgr(l_lo, Z_R1_scratch); 1684 break; 1685 default: ShouldNotReachHere(); 1686 } 1687 } else { 1688 Register r_lo; 1689 if (right->type() == T_OBJECT || right->type() == T_ARRAY) { 1690 r_lo = right->as_register(); 1691 } else { 1692 r_lo = right->as_register_lo(); 1693 } 1694 switch (code) { 1695 case lir_logic_and: 1696 __ z_ngr(l_lo, r_lo); 1697 break; 1698 case lir_logic_or: 1699 __ z_ogr(l_lo, r_lo); 1700 break; 1701 case lir_logic_xor: 1702 __ z_xgr(l_lo, r_lo); 1703 break; 1704 default: ShouldNotReachHere(); 1705 } 1706 } 1707 1708 Register dst_lo = dst->as_register_lo(); 1709 1710 move_regs(l_lo, dst_lo); 1711 } 1712 } 1713 1714 // See operand selection in LIRGenerator::do_ArithmeticOp_Int(). 1715 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 1716 if (left->is_double_cpu()) { 1717 // 64 bit integer case 1718 assert(left->is_double_cpu(), "left must be register"); 1719 assert(right->is_double_cpu() || is_power_of_2_long(right->as_jlong()), 1720 "right must be register or power of 2 constant"); 1721 assert(result->is_double_cpu(), "result must be register"); 1722 1723 Register lreg = left->as_register_lo(); 1724 Register dreg = result->as_register_lo(); 1725 1726 if (right->is_constant()) { 1727 // Convert division by a power of two into some shifts and logical operations. 1728 Register treg1 = Z_R0_scratch; 1729 Register treg2 = Z_R1_scratch; 1730 jlong divisor = right->as_jlong(); 1731 jlong log_divisor = log2_long(right->as_jlong()); 1732 1733 if (divisor == min_jlong) { 1734 // Min_jlong is special. Result is '0' except for min_jlong/min_jlong = 1. 1735 if (dreg == lreg) { 1736 NearLabel done; 1737 __ load_const_optimized(treg2, min_jlong); 1738 __ z_cgr(lreg, treg2); 1739 __ z_lghi(dreg, 0); // Preserves condition code. 1740 __ z_brne(done); 1741 __ z_lghi(dreg, 1); // min_jlong / min_jlong = 1 1742 __ bind(done); 1743 } else { 1744 assert_different_registers(dreg, lreg); 1745 NearLabel done; 1746 __ z_lghi(dreg, 0); 1747 __ compare64_and_branch(lreg, min_jlong, Assembler::bcondNotEqual, done); 1748 __ z_lghi(dreg, 1); 1749 __ bind(done); 1750 } 1751 return; 1752 } 1753 __ move_reg_if_needed(dreg, T_LONG, lreg, T_LONG); 1754 if (divisor == 2) { 1755 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1756 } else { 1757 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1758 __ and_imm(treg2, divisor - 1, treg1, true); 1759 } 1760 if (code == lir_idiv) { 1761 __ z_agr(dreg, treg2); 1762 __ z_srag(dreg, dreg, log_divisor); 1763 } else { 1764 assert(code == lir_irem, "check"); 1765 __ z_agr(treg2, dreg); 1766 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1767 __ z_sgr(dreg, treg2); 1768 } 1769 return; 1770 } 1771 1772 // Divisor is not a power of 2 constant. 1773 Register rreg = right->as_register_lo(); 1774 Register treg = temp->as_register_lo(); 1775 assert(right->is_double_cpu(), "right must be register"); 1776 assert(lreg == Z_R11, "see ldivInOpr()"); 1777 assert(rreg != lreg, "right register must not be same as left register"); 1778 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) || 1779 (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see ldivInOpr(), ldivOutOpr(), lremOutOpr()"); 1780 1781 Register R1 = lreg->predecessor(); 1782 Register R2 = rreg; 1783 assert(code != lir_idiv || lreg==dreg, "see code below"); 1784 if (code == lir_idiv) { 1785 __ z_lcgr(lreg, lreg); 1786 } else { 1787 __ clear_reg(dreg, true, false); 1788 } 1789 NearLabel done; 1790 __ compare64_and_branch(R2, -1, Assembler::bcondEqual, done); 1791 if (code == lir_idiv) { 1792 __ z_lcgr(lreg, lreg); // Revert lcgr above. 1793 } 1794 if (ImplicitDiv0Checks) { 1795 // No debug info because the idiv won't trap. 1796 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1797 // which is unnecessary, too. 1798 add_debug_info_for_div0(__ offset(), info); 1799 } 1800 __ z_dsgr(R1, R2); 1801 __ bind(done); 1802 return; 1803 } 1804 1805 // 32 bit integer case 1806 1807 assert(left->is_single_cpu(), "left must be register"); 1808 assert(right->is_single_cpu() || is_power_of_2(right->as_jint()), "right must be register or power of 2 constant"); 1809 assert(result->is_single_cpu(), "result must be register"); 1810 1811 Register lreg = left->as_register(); 1812 Register dreg = result->as_register(); 1813 1814 if (right->is_constant()) { 1815 // Convert division by a power of two into some shifts and logical operations. 1816 Register treg1 = Z_R0_scratch; 1817 Register treg2 = Z_R1_scratch; 1818 jlong divisor = right->as_jint(); 1819 jlong log_divisor = log2_long(right->as_jint()); 1820 __ move_reg_if_needed(dreg, T_LONG, lreg, T_INT); // sign extend 1821 if (divisor == 2) { 1822 __ z_srlg(treg2, dreg, 63); // dividend < 0 ? 1 : 0 1823 } else { 1824 __ z_srag(treg2, dreg, 63); // dividend < 0 ? -1 : 0 1825 __ and_imm(treg2, divisor - 1, treg1, true); 1826 } 1827 if (code == lir_idiv) { 1828 __ z_agr(dreg, treg2); 1829 __ z_srag(dreg, dreg, log_divisor); 1830 } else { 1831 assert(code == lir_irem, "check"); 1832 __ z_agr(treg2, dreg); 1833 __ and_imm(treg2, ~(divisor - 1), treg1, true); 1834 __ z_sgr(dreg, treg2); 1835 } 1836 return; 1837 } 1838 1839 // Divisor is not a power of 2 constant. 1840 Register rreg = right->as_register(); 1841 Register treg = temp->as_register(); 1842 assert(right->is_single_cpu(), "right must be register"); 1843 assert(lreg == Z_R11, "left register must be rax,"); 1844 assert(rreg != lreg, "right register must not be same as left register"); 1845 assert((code == lir_idiv && dreg == Z_R11 && treg == Z_R10) 1846 || (code == lir_irem && dreg == Z_R10 && treg == Z_R11), "see divInOpr(), divOutOpr(), remOutOpr()"); 1847 1848 Register R1 = lreg->predecessor(); 1849 Register R2 = rreg; 1850 __ move_reg_if_needed(lreg, T_LONG, lreg, T_INT); // sign extend 1851 if (ImplicitDiv0Checks) { 1852 // No debug info because the idiv won't trap. 1853 // Add_debug_info_for_div0 would instantiate another DivByZeroStub, 1854 // which is unnecessary, too. 1855 add_debug_info_for_div0(__ offset(), info); 1856 } 1857 __ z_dsgfr(R1, R2); 1858 } 1859 1860 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1861 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1862 assert(exceptionPC->as_register() == Z_EXC_PC, "should match"); 1863 1864 // Exception object is not added to oop map by LinearScan 1865 // (LinearScan assumes that no oops are in fixed registers). 1866 info->add_register_oop(exceptionOop); 1867 1868 // Reuse the debug info from the safepoint poll for the throw op itself. 1869 __ get_PC(Z_EXC_PC); 1870 add_call_info(__ offset(), info); // for exception handler 1871 address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1872 : Runtime1::handle_exception_nofpu_id); 1873 emit_call_c(stub); 1874 } 1875 1876 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1877 assert(exceptionOop->as_register() == Z_EXC_OOP, "should match"); 1878 1879 __ branch_optimized(Assembler::bcondAlways, _unwind_handler_entry); 1880 } 1881 1882 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1883 ciArrayKlass* default_type = op->expected_type(); 1884 Register src = op->src()->as_register(); 1885 Register dst = op->dst()->as_register(); 1886 Register src_pos = op->src_pos()->as_register(); 1887 Register dst_pos = op->dst_pos()->as_register(); 1888 Register length = op->length()->as_register(); 1889 Register tmp = op->tmp()->as_register(); 1890 1891 CodeStub* stub = op->stub(); 1892 int flags = op->flags(); 1893 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1894 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1895 1896 // If we don't know anything, just go through the generic arraycopy. 1897 if (default_type == NULL) { 1898 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1899 1900 if (copyfunc_addr == NULL) { 1901 // Take a slow path for generic arraycopy. 1902 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1903 __ bind(*stub->continuation()); 1904 return; 1905 } 1906 1907 Label done; 1908 // Save outgoing arguments in callee saved registers (C convention) in case 1909 // a call to System.arraycopy is needed. 1910 Register callee_saved_src = Z_R10; 1911 Register callee_saved_src_pos = Z_R11; 1912 Register callee_saved_dst = Z_R12; 1913 Register callee_saved_dst_pos = Z_R13; 1914 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 1915 1916 __ lgr_if_needed(callee_saved_src, src); 1917 __ lgr_if_needed(callee_saved_src_pos, src_pos); 1918 __ lgr_if_needed(callee_saved_dst, dst); 1919 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 1920 __ lgr_if_needed(callee_saved_length, length); 1921 1922 // C function requires 64 bit values. 1923 __ z_lgfr(src_pos, src_pos); 1924 __ z_lgfr(dst_pos, dst_pos); 1925 __ z_lgfr(length, length); 1926 1927 // Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint. 1928 1929 // The arguments are in the corresponding registers. 1930 assert(Z_ARG1 == src, "assumption"); 1931 assert(Z_ARG2 == src_pos, "assumption"); 1932 assert(Z_ARG3 == dst, "assumption"); 1933 assert(Z_ARG4 == dst_pos, "assumption"); 1934 assert(Z_ARG5 == length, "assumption"); 1935 #ifndef PRODUCT 1936 if (PrintC1Statistics) { 1937 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt); 1938 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 1939 } 1940 #endif 1941 emit_call_c(copyfunc_addr); 1942 CHECK_BAILOUT(); 1943 1944 __ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 1945 1946 __ z_lgr(tmp, Z_RET); 1947 __ z_xilf(tmp, -1); 1948 1949 // Restore values from callee saved registers so they are where the stub 1950 // expects them. 1951 __ lgr_if_needed(src, callee_saved_src); 1952 __ lgr_if_needed(src_pos, callee_saved_src_pos); 1953 __ lgr_if_needed(dst, callee_saved_dst); 1954 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 1955 __ lgr_if_needed(length, callee_saved_length); 1956 1957 __ z_sr(length, tmp); 1958 __ z_ar(src_pos, tmp); 1959 __ z_ar(dst_pos, tmp); 1960 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 1961 1962 __ bind(*stub->continuation()); 1963 return; 1964 } 1965 1966 assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point"); 1967 1968 int elem_size = type2aelembytes(basic_type); 1969 int shift_amount; 1970 1971 switch (elem_size) { 1972 case 1 : 1973 shift_amount = 0; 1974 break; 1975 case 2 : 1976 shift_amount = 1; 1977 break; 1978 case 4 : 1979 shift_amount = 2; 1980 break; 1981 case 8 : 1982 shift_amount = 3; 1983 break; 1984 default: 1985 shift_amount = -1; 1986 ShouldNotReachHere(); 1987 } 1988 1989 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes()); 1990 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes()); 1991 Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes()); 1992 Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes()); 1993 1994 // Length and pos's are all sign extended at this point on 64bit. 1995 1996 // test for NULL 1997 if (flags & LIR_OpArrayCopy::src_null_check) { 1998 __ compareU64_and_branch(src, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 1999 } 2000 if (flags & LIR_OpArrayCopy::dst_null_check) { 2001 __ compareU64_and_branch(dst, (intptr_t)0, Assembler::bcondZero, *stub->entry()); 2002 } 2003 2004 // Check if negative. 2005 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 2006 __ compare32_and_branch(src_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2007 } 2008 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 2009 __ compare32_and_branch(dst_pos, (intptr_t)0, Assembler::bcondLow, *stub->entry()); 2010 } 2011 2012 // If the compiler was not able to prove that exact type of the source or the destination 2013 // of the arraycopy is an array type, check at runtime if the source or the destination is 2014 // an instance type. 2015 if (flags & LIR_OpArrayCopy::type_check) { 2016 assert(Klass::_lh_neutral_value == 0, "or replace z_lt instructions"); 2017 2018 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2019 __ load_klass(tmp, dst); 2020 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2021 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2022 } 2023 2024 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2025 __ load_klass(tmp, src); 2026 __ z_lt(tmp, Address(tmp, in_bytes(Klass::layout_helper_offset()))); 2027 __ branch_optimized(Assembler::bcondNotLow, *stub->entry()); 2028 } 2029 } 2030 2031 if (flags & LIR_OpArrayCopy::src_range_check) { 2032 __ z_la(tmp, Address(src_pos, length)); 2033 __ z_cl(tmp, src_length_addr); 2034 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2035 } 2036 if (flags & LIR_OpArrayCopy::dst_range_check) { 2037 __ z_la(tmp, Address(dst_pos, length)); 2038 __ z_cl(tmp, dst_length_addr); 2039 __ branch_optimized(Assembler::bcondHigh, *stub->entry()); 2040 } 2041 2042 if (flags & LIR_OpArrayCopy::length_positive_check) { 2043 __ z_ltr(length, length); 2044 __ branch_optimized(Assembler::bcondNegative, *stub->entry()); 2045 } 2046 2047 // Stubs require 64 bit values. 2048 __ z_lgfr(src_pos, src_pos); // int -> long 2049 __ z_lgfr(dst_pos, dst_pos); // int -> long 2050 __ z_lgfr(length, length); // int -> long 2051 2052 if (flags & LIR_OpArrayCopy::type_check) { 2053 // We don't know the array types are compatible. 2054 if (basic_type != T_OBJECT) { 2055 // Simple test for basic type arrays. 2056 if (UseCompressedClassPointers) { 2057 __ z_l(tmp, src_klass_addr); 2058 __ z_c(tmp, dst_klass_addr); 2059 } else { 2060 __ z_lg(tmp, src_klass_addr); 2061 __ z_cg(tmp, dst_klass_addr); 2062 } 2063 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2064 } else { 2065 // For object arrays, if src is a sub class of dst then we can 2066 // safely do the copy. 2067 NearLabel cont, slow; 2068 Register src_klass = Z_R1_scratch; 2069 Register dst_klass = Z_R10; 2070 2071 __ load_klass(src_klass, src); 2072 __ load_klass(dst_klass, dst); 2073 2074 __ check_klass_subtype_fast_path(src_klass, dst_klass, tmp, &cont, &slow, NULL); 2075 2076 store_parameter(src_klass, 0); // sub 2077 store_parameter(dst_klass, 1); // super 2078 emit_call_c(Runtime1::entry_for (Runtime1::slow_subtype_check_id)); 2079 CHECK_BAILOUT(); 2080 // Sets condition code 0 for match (2 otherwise). 2081 __ branch_optimized(Assembler::bcondEqual, cont); 2082 2083 __ bind(slow); 2084 2085 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2086 if (copyfunc_addr != NULL) { // use stub if available 2087 // Src is not a sub class of dst so we have to do a 2088 // per-element check. 2089 2090 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2091 if ((flags & mask) != mask) { 2092 // Check that at least both of them object arrays. 2093 assert(flags & mask, "one of the two should be known to be an object array"); 2094 2095 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2096 __ load_klass(tmp, src); 2097 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2098 __ load_klass(tmp, dst); 2099 } 2100 Address klass_lh_addr(tmp, Klass::layout_helper_offset()); 2101 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2102 __ load_const_optimized(Z_R1_scratch, objArray_lh); 2103 __ z_c(Z_R1_scratch, klass_lh_addr); 2104 __ branch_optimized(Assembler::bcondNotEqual, *stub->entry()); 2105 } 2106 2107 // Save outgoing arguments in callee saved registers (C convention) in case 2108 // a call to System.arraycopy is needed. 2109 Register callee_saved_src = Z_R10; 2110 Register callee_saved_src_pos = Z_R11; 2111 Register callee_saved_dst = Z_R12; 2112 Register callee_saved_dst_pos = Z_R13; 2113 Register callee_saved_length = Z_ARG5; // Z_ARG5 == Z_R6 is callee saved. 2114 2115 __ lgr_if_needed(callee_saved_src, src); 2116 __ lgr_if_needed(callee_saved_src_pos, src_pos); 2117 __ lgr_if_needed(callee_saved_dst, dst); 2118 __ lgr_if_needed(callee_saved_dst_pos, dst_pos); 2119 __ lgr_if_needed(callee_saved_length, length); 2120 2121 __ z_llgfr(length, length); // Higher 32bits must be null. 2122 2123 __ z_sllg(Z_ARG1, src_pos, shift_amount); // index -> byte offset 2124 __ z_sllg(Z_ARG2, dst_pos, shift_amount); // index -> byte offset 2125 2126 __ z_la(Z_ARG1, Address(src, Z_ARG1, arrayOopDesc::base_offset_in_bytes(basic_type))); 2127 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2128 __ z_la(Z_ARG2, Address(dst, Z_ARG2, arrayOopDesc::base_offset_in_bytes(basic_type))); 2129 assert_different_registers(Z_ARG2, dst, length); 2130 2131 __ z_lgr(Z_ARG3, length); 2132 assert_different_registers(Z_ARG3, dst); 2133 2134 __ load_klass(Z_ARG5, dst); 2135 __ z_lg(Z_ARG5, Address(Z_ARG5, ObjArrayKlass::element_klass_offset())); 2136 __ z_lg(Z_ARG4, Address(Z_ARG5, Klass::super_check_offset_offset())); 2137 emit_call_c(copyfunc_addr); 2138 CHECK_BAILOUT(); 2139 2140 #ifndef PRODUCT 2141 if (PrintC1Statistics) { 2142 NearLabel failed; 2143 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, failed); 2144 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_cnt); 2145 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2146 __ bind(failed); 2147 } 2148 #endif 2149 2150 __ compareU32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation()); 2151 2152 #ifndef PRODUCT 2153 if (PrintC1Statistics) { 2154 __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_arraycopy_checkcast_attempt_cnt); 2155 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2156 } 2157 #endif 2158 2159 __ z_lgr(tmp, Z_RET); 2160 __ z_xilf(tmp, -1); 2161 2162 // Restore previously spilled arguments 2163 __ lgr_if_needed(src, callee_saved_src); 2164 __ lgr_if_needed(src_pos, callee_saved_src_pos); 2165 __ lgr_if_needed(dst, callee_saved_dst); 2166 __ lgr_if_needed(dst_pos, callee_saved_dst_pos); 2167 __ lgr_if_needed(length, callee_saved_length); 2168 2169 __ z_sr(length, tmp); 2170 __ z_ar(src_pos, tmp); 2171 __ z_ar(dst_pos, tmp); 2172 } 2173 2174 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2175 2176 __ bind(cont); 2177 } 2178 } 2179 2180 #ifdef ASSERT 2181 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2182 // Sanity check the known type with the incoming class. For the 2183 // primitive case the types must match exactly with src.klass and 2184 // dst.klass each exactly matching the default type. For the 2185 // object array case, if no type check is needed then either the 2186 // dst type is exactly the expected type and the src type is a 2187 // subtype which we can't check or src is the same array as dst 2188 // but not necessarily exactly of type default_type. 2189 NearLabel known_ok, halt; 2190 metadata2reg(default_type->constant_encoding(), tmp); 2191 if (UseCompressedClassPointers) { 2192 __ encode_klass_not_null(tmp); 2193 } 2194 2195 if (basic_type != T_OBJECT) { 2196 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2197 else { __ z_cg(tmp, dst_klass_addr); } 2198 __ branch_optimized(Assembler::bcondNotEqual, halt); 2199 if (UseCompressedClassPointers) { __ z_c (tmp, src_klass_addr); } 2200 else { __ z_cg(tmp, src_klass_addr); } 2201 __ branch_optimized(Assembler::bcondEqual, known_ok); 2202 } else { 2203 if (UseCompressedClassPointers) { __ z_c (tmp, dst_klass_addr); } 2204 else { __ z_cg(tmp, dst_klass_addr); } 2205 __ branch_optimized(Assembler::bcondEqual, known_ok); 2206 __ compareU64_and_branch(src, dst, Assembler::bcondEqual, known_ok); 2207 } 2208 __ bind(halt); 2209 __ stop("incorrect type information in arraycopy"); 2210 __ bind(known_ok); 2211 } 2212 #endif 2213 2214 #ifndef PRODUCT 2215 if (PrintC1Statistics) { 2216 __ load_const_optimized(Z_R1_scratch, Runtime1::arraycopy_count_address(basic_type)); 2217 __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch); 2218 } 2219 #endif 2220 2221 __ z_sllg(tmp, src_pos, shift_amount); // index -> byte offset 2222 __ z_sllg(Z_R1_scratch, dst_pos, shift_amount); // index -> byte offset 2223 2224 assert_different_registers(Z_ARG1, dst, dst_pos, length); 2225 __ z_la(Z_ARG1, Address(src, tmp, arrayOopDesc::base_offset_in_bytes(basic_type))); 2226 assert_different_registers(Z_ARG2, length); 2227 __ z_la(Z_ARG2, Address(dst, Z_R1_scratch, arrayOopDesc::base_offset_in_bytes(basic_type))); 2228 __ lgr_if_needed(Z_ARG3, length); 2229 2230 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2231 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2232 const char *name; 2233 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2234 __ call_VM_leaf(entry); 2235 2236 __ bind(*stub->continuation()); 2237 } 2238 2239 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2240 if (dest->is_single_cpu()) { 2241 if (left->type() == T_OBJECT) { 2242 switch (code) { 2243 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2244 case lir_shr: __ z_srag (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2245 case lir_ushr: __ z_srlg (dest->as_register(), left->as_register(), 0, count->as_register()); break; 2246 default: ShouldNotReachHere(); 2247 } 2248 } else { 2249 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2250 Register masked_count = Z_R1_scratch; 2251 __ z_lr(masked_count, count->as_register()); 2252 __ z_nill(masked_count, 31); 2253 switch (code) { 2254 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), 0, masked_count); break; 2255 case lir_shr: __ z_sra (dest->as_register(), 0, masked_count); break; 2256 case lir_ushr: __ z_srl (dest->as_register(), 0, masked_count); break; 2257 default: ShouldNotReachHere(); 2258 } 2259 } 2260 } else { 2261 switch (code) { 2262 case lir_shl: __ z_sllg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2263 case lir_shr: __ z_srag (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2264 case lir_ushr: __ z_srlg (dest->as_register_lo(), left->as_register_lo(), 0, count->as_register()); break; 2265 default: ShouldNotReachHere(); 2266 } 2267 } 2268 } 2269 2270 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2271 if (left->type() == T_OBJECT) { 2272 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2273 Register l = left->as_register(); 2274 Register d = dest->as_register_lo(); 2275 switch (code) { 2276 case lir_shl: __ z_sllg (d, l, count); break; 2277 case lir_shr: __ z_srag (d, l, count); break; 2278 case lir_ushr: __ z_srlg (d, l, count); break; 2279 default: ShouldNotReachHere(); 2280 } 2281 return; 2282 } 2283 if (dest->is_single_cpu()) { 2284 assert(code == lir_shl || left == dest, "left and dest must be equal for 2 operand form right shifts"); 2285 count = count & 0x1F; // Java spec 2286 switch (code) { 2287 case lir_shl: __ z_sllg (dest->as_register(), left->as_register(), count); break; 2288 case lir_shr: __ z_sra (dest->as_register(), count); break; 2289 case lir_ushr: __ z_srl (dest->as_register(), count); break; 2290 default: ShouldNotReachHere(); 2291 } 2292 } else if (dest->is_double_cpu()) { 2293 count = count & 63; // Java spec 2294 Register l = left->as_pointer_register(); 2295 Register d = dest->as_pointer_register(); 2296 switch (code) { 2297 case lir_shl: __ z_sllg (d, l, count); break; 2298 case lir_shr: __ z_srag (d, l, count); break; 2299 case lir_ushr: __ z_srlg (d, l, count); break; 2300 default: ShouldNotReachHere(); 2301 } 2302 } else { 2303 ShouldNotReachHere(); 2304 } 2305 } 2306 2307 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2308 if (op->init_check()) { 2309 // Make sure klass is initialized & doesn't have finalizer. 2310 const int state_offset = in_bytes(InstanceKlass::init_state_offset()); 2311 Register iklass = op->klass()->as_register(); 2312 add_debug_info_for_null_check_here(op->stub()->info()); 2313 if (Immediate::is_uimm12(state_offset)) { 2314 __ z_cli(state_offset, iklass, InstanceKlass::fully_initialized); 2315 } else { 2316 __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized); 2317 } 2318 __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far. 2319 } 2320 __ allocate_object(op->obj()->as_register(), 2321 op->tmp1()->as_register(), 2322 op->tmp2()->as_register(), 2323 op->header_size(), 2324 op->object_size(), 2325 op->klass()->as_register(), 2326 *op->stub()->entry()); 2327 __ bind(*op->stub()->continuation()); 2328 __ verify_oop(op->obj()->as_register()); 2329 } 2330 2331 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2332 Register len = op->len()->as_register(); 2333 __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend 2334 2335 if (UseSlowPath || 2336 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2337 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2338 __ z_brul(*op->stub()->entry()); 2339 } else { 2340 __ allocate_array(op->obj()->as_register(), 2341 op->len()->as_register(), 2342 op->tmp1()->as_register(), 2343 op->tmp2()->as_register(), 2344 arrayOopDesc::header_size(op->type()), 2345 type2aelembytes(op->type()), 2346 op->klass()->as_register(), 2347 *op->stub()->entry()); 2348 } 2349 __ bind(*op->stub()->continuation()); 2350 } 2351 2352 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, 2353 Register recv, Register tmp1, Label* update_done) { 2354 uint i; 2355 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2356 Label next_test; 2357 // See if the receiver is receiver[n]. 2358 Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2359 __ z_cg(recv, receiver_addr); 2360 __ z_brne(next_test); 2361 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); 2362 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2363 __ branch_optimized(Assembler::bcondAlways, *update_done); 2364 __ bind(next_test); 2365 } 2366 2367 // Didn't find receiver; find next empty slot and fill it in. 2368 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2369 Label next_test; 2370 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))); 2371 __ z_ltg(Z_R0_scratch, recv_addr); 2372 __ z_brne(next_test); 2373 __ z_stg(recv, recv_addr); 2374 __ load_const_optimized(tmp1, DataLayout::counter_increment); 2375 __ z_stg(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)), mdo); 2376 __ branch_optimized(Assembler::bcondAlways, *update_done); 2377 __ bind(next_test); 2378 } 2379 } 2380 2381 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2382 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2383 Unimplemented(); 2384 } 2385 2386 void LIR_Assembler::store_parameter(Register r, int param_num) { 2387 assert(param_num >= 0, "invalid num"); 2388 int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame; 2389 assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2390 __ z_stg(r, offset_in_bytes, Z_SP); 2391 } 2392 2393 void LIR_Assembler::store_parameter(jint c, int param_num) { 2394 assert(param_num >= 0, "invalid num"); 2395 int offset_in_bytes = param_num * BytesPerWord + FrameMap::first_available_sp_in_frame; 2396 assert(offset_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset"); 2397 __ store_const(Address(Z_SP, offset_in_bytes), c, Z_R1_scratch, true); 2398 } 2399 2400 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2401 // We always need a stub for the failure case. 2402 CodeStub* stub = op->stub(); 2403 Register obj = op->object()->as_register(); 2404 Register k_RInfo = op->tmp1()->as_register(); 2405 Register klass_RInfo = op->tmp2()->as_register(); 2406 Register dst = op->result_opr()->as_register(); 2407 Register Rtmp1 = Z_R1_scratch; 2408 ciKlass* k = op->klass(); 2409 2410 assert(!op->tmp3()->is_valid(), "tmp3's not needed"); 2411 2412 // Check if it needs to be profiled. 2413 ciMethodData* md = NULL; 2414 ciProfileData* data = NULL; 2415 2416 if (op->should_profile()) { 2417 ciMethod* method = op->profiled_method(); 2418 assert(method != NULL, "Should have method"); 2419 int bci = op->profiled_bci(); 2420 md = method->method_data_or_null(); 2421 assert(md != NULL, "Sanity"); 2422 data = md->bci_to_data(bci); 2423 assert(data != NULL, "need data for type check"); 2424 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2425 } 2426 2427 // Temp operands do not overlap with inputs, if this is their last 2428 // use (end of range is exclusive), so a register conflict is possible. 2429 if (obj == k_RInfo) { 2430 k_RInfo = dst; 2431 } else if (obj == klass_RInfo) { 2432 klass_RInfo = dst; 2433 } 2434 assert_different_registers(obj, k_RInfo, klass_RInfo); 2435 2436 if (op->should_profile()) { 2437 NearLabel not_null; 2438 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2439 // Object is null; update MDO and exit. 2440 Register mdo = klass_RInfo; 2441 metadata2reg(md->constant_encoding(), mdo); 2442 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2443 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2444 __ or2mem_8(data_addr, header_bits); 2445 __ branch_optimized(Assembler::bcondAlways, *obj_is_null); 2446 __ bind(not_null); 2447 } else { 2448 __ compareU64_and_branch(obj, (intptr_t) 0, Assembler::bcondEqual, *obj_is_null); 2449 } 2450 2451 NearLabel profile_cast_failure, profile_cast_success; 2452 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure; 2453 Label *success_target = op->should_profile() ? &profile_cast_success : success; 2454 2455 // Patching may screw with our temporaries on sparc, 2456 // so let's do it before loading the class. 2457 if (k->is_loaded()) { 2458 metadata2reg(k->constant_encoding(), k_RInfo); 2459 } else { 2460 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2461 } 2462 assert(obj != k_RInfo, "must be different"); 2463 2464 __ verify_oop(obj); 2465 2466 // Get object class. 2467 // Not a safepoint as obj null check happens earlier. 2468 if (op->fast_check()) { 2469 if (UseCompressedClassPointers) { 2470 __ load_klass(klass_RInfo, obj); 2471 __ compareU64_and_branch(k_RInfo, klass_RInfo, Assembler::bcondNotEqual, *failure_target); 2472 } else { 2473 __ z_cg(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 2474 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2475 } 2476 // Successful cast, fall through to profile or jump. 2477 } else { 2478 bool need_slow_path = !k->is_loaded() || 2479 ((int) k->super_check_offset() == in_bytes(Klass::secondary_super_cache_offset())); 2480 intptr_t super_check_offset = k->is_loaded() ? k->super_check_offset() : -1L; 2481 __ load_klass(klass_RInfo, obj); 2482 // Perform the fast part of the checking logic. 2483 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, 2484 (need_slow_path ? success_target : NULL), 2485 failure_target, NULL, 2486 RegisterOrConstant(super_check_offset)); 2487 if (need_slow_path) { 2488 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2489 address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); 2490 store_parameter(klass_RInfo, 0); // sub 2491 store_parameter(k_RInfo, 1); // super 2492 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2493 CHECK_BAILOUT(); 2494 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2495 // Fall through to success case. 2496 } 2497 } 2498 2499 if (op->should_profile()) { 2500 Register mdo = klass_RInfo, recv = k_RInfo; 2501 assert_different_registers(obj, mdo, recv); 2502 __ bind(profile_cast_success); 2503 metadata2reg(md->constant_encoding(), mdo); 2504 __ load_klass(recv, obj); 2505 type_profile_helper(mdo, md, data, recv, Rtmp1, success); 2506 __ branch_optimized(Assembler::bcondAlways, *success); 2507 2508 __ bind(profile_cast_failure); 2509 metadata2reg(md->constant_encoding(), mdo); 2510 __ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1); 2511 __ branch_optimized(Assembler::bcondAlways, *failure); 2512 } else { 2513 __ branch_optimized(Assembler::bcondAlways, *success); 2514 } 2515 } 2516 2517 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2518 LIR_Code code = op->code(); 2519 if (code == lir_store_check) { 2520 Register value = op->object()->as_register(); 2521 Register array = op->array()->as_register(); 2522 Register k_RInfo = op->tmp1()->as_register(); 2523 Register klass_RInfo = op->tmp2()->as_register(); 2524 Register Rtmp1 = Z_R1_scratch; 2525 2526 CodeStub* stub = op->stub(); 2527 2528 // Check if it needs to be profiled. 2529 ciMethodData* md = NULL; 2530 ciProfileData* data = NULL; 2531 2532 assert_different_registers(value, k_RInfo, klass_RInfo); 2533 2534 if (op->should_profile()) { 2535 ciMethod* method = op->profiled_method(); 2536 assert(method != NULL, "Should have method"); 2537 int bci = op->profiled_bci(); 2538 md = method->method_data_or_null(); 2539 assert(md != NULL, "Sanity"); 2540 data = md->bci_to_data(bci); 2541 assert(data != NULL, "need data for type check"); 2542 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2543 } 2544 NearLabel profile_cast_success, profile_cast_failure, done; 2545 Label *success_target = op->should_profile() ? &profile_cast_success : &done; 2546 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry(); 2547 2548 if (op->should_profile()) { 2549 NearLabel not_null; 2550 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondNotEqual, not_null); 2551 // Object is null; update MDO and exit. 2552 Register mdo = klass_RInfo; 2553 metadata2reg(md->constant_encoding(), mdo); 2554 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset())); 2555 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant()); 2556 __ or2mem_8(data_addr, header_bits); 2557 __ branch_optimized(Assembler::bcondAlways, done); 2558 __ bind(not_null); 2559 } else { 2560 __ compareU64_and_branch(value, (intptr_t) 0, Assembler::bcondEqual, done); 2561 } 2562 2563 add_debug_info_for_null_check_here(op->info_for_exception()); 2564 __ load_klass(k_RInfo, array); 2565 __ load_klass(klass_RInfo, value); 2566 2567 // Get instance klass (it's already uncompressed). 2568 __ z_lg(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); 2569 // Perform the fast part of the checking logic. 2570 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 2571 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2572 address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id); 2573 store_parameter(klass_RInfo, 0); // sub 2574 store_parameter(k_RInfo, 1); // super 2575 emit_call_c(a); // Sets condition code 0 for match (2 otherwise). 2576 CHECK_BAILOUT(); 2577 __ branch_optimized(Assembler::bcondNotEqual, *failure_target); 2578 // Fall through to success case. 2579 2580 if (op->should_profile()) { 2581 Register mdo = klass_RInfo, recv = k_RInfo; 2582 assert_different_registers(value, mdo, recv); 2583 __ bind(profile_cast_success); 2584 metadata2reg(md->constant_encoding(), mdo); 2585 __ load_klass(recv, value); 2586 type_profile_helper(mdo, md, data, recv, Rtmp1, &done); 2587 __ branch_optimized(Assembler::bcondAlways, done); 2588 2589 __ bind(profile_cast_failure); 2590 metadata2reg(md->constant_encoding(), mdo); 2591 __ add2mem_64(Address(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())), -(int)DataLayout::counter_increment, Rtmp1); 2592 __ branch_optimized(Assembler::bcondAlways, *stub->entry()); 2593 } 2594 2595 __ bind(done); 2596 } else { 2597 if (code == lir_checkcast) { 2598 Register obj = op->object()->as_register(); 2599 Register dst = op->result_opr()->as_register(); 2600 NearLabel success; 2601 emit_typecheck_helper(op, &success, op->stub()->entry(), &success); 2602 __ bind(success); 2603 __ lgr_if_needed(dst, obj); 2604 } else { 2605 if (code == lir_instanceof) { 2606 Register obj = op->object()->as_register(); 2607 Register dst = op->result_opr()->as_register(); 2608 NearLabel success, failure, done; 2609 emit_typecheck_helper(op, &success, &failure, &failure); 2610 __ bind(failure); 2611 __ clear_reg(dst); 2612 __ branch_optimized(Assembler::bcondAlways, done); 2613 __ bind(success); 2614 __ load_const_optimized(dst, 1); 2615 __ bind(done); 2616 } else { 2617 ShouldNotReachHere(); 2618 } 2619 } 2620 } 2621 } 2622 2623 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2624 Register addr = op->addr()->as_pointer_register(); 2625 Register t1_cmp = Z_R1_scratch; 2626 if (op->code() == lir_cas_long) { 2627 assert(VM_Version::supports_cx8(), "wrong machine"); 2628 Register cmp_value_lo = op->cmp_value()->as_register_lo(); 2629 Register new_value_lo = op->new_value()->as_register_lo(); 2630 __ z_lgr(t1_cmp, cmp_value_lo); 2631 // Perform the compare and swap operation. 2632 __ z_csg(t1_cmp, new_value_lo, 0, addr); 2633 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2634 Register cmp_value = op->cmp_value()->as_register(); 2635 Register new_value = op->new_value()->as_register(); 2636 if (op->code() == lir_cas_obj) { 2637 if (UseCompressedOops) { 2638 t1_cmp = op->tmp1()->as_register(); 2639 Register t2_new = op->tmp2()->as_register(); 2640 assert_different_registers(cmp_value, new_value, addr, t1_cmp, t2_new); 2641 __ oop_encoder(t1_cmp, cmp_value, true /*maybe null*/); 2642 __ oop_encoder(t2_new, new_value, true /*maybe null*/); 2643 __ z_cs(t1_cmp, t2_new, 0, addr); 2644 } else { 2645 __ z_lgr(t1_cmp, cmp_value); 2646 __ z_csg(t1_cmp, new_value, 0, addr); 2647 } 2648 } else { 2649 __ z_lr(t1_cmp, cmp_value); 2650 __ z_cs(t1_cmp, new_value, 0, addr); 2651 } 2652 } else { 2653 ShouldNotReachHere(); // new lir_cas_?? 2654 } 2655 } 2656 2657 void LIR_Assembler::set_24bit_FPU() { 2658 ShouldNotCallThis(); // x86 only 2659 } 2660 2661 void LIR_Assembler::reset_FPU() { 2662 ShouldNotCallThis(); // x86 only 2663 } 2664 2665 void LIR_Assembler::breakpoint() { 2666 Unimplemented(); 2667 // __ breakpoint_trap(); 2668 } 2669 2670 void LIR_Assembler::push(LIR_Opr opr) { 2671 ShouldNotCallThis(); // unused 2672 } 2673 2674 void LIR_Assembler::pop(LIR_Opr opr) { 2675 ShouldNotCallThis(); // unused 2676 } 2677 2678 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2679 Address addr = frame_map()->address_for_monitor_lock(monitor_no); 2680 __ add2reg(dst_opr->as_register(), addr.disp(), addr.base()); 2681 } 2682 2683 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2684 Register obj = op->obj_opr()->as_register(); // May not be an oop. 2685 Register hdr = op->hdr_opr()->as_register(); 2686 Register lock = op->lock_opr()->as_register(); 2687 if (!UseFastLocking) { 2688 __ branch_optimized(Assembler::bcondAlways, *op->stub()->entry()); 2689 } else if (op->code() == lir_lock) { 2690 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2691 // Add debug info for NullPointerException only if one is possible. 2692 if (op->info() != NULL) { 2693 add_debug_info_for_null_check_here(op->info()); 2694 } 2695 __ lock_object(hdr, obj, lock, *op->stub()->entry()); 2696 // done 2697 } else if (op->code() == lir_unlock) { 2698 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2699 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2700 } else { 2701 ShouldNotReachHere(); 2702 } 2703 __ bind(*op->stub()->continuation()); 2704 } 2705 2706 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2707 ciMethod* method = op->profiled_method(); 2708 int bci = op->profiled_bci(); 2709 ciMethod* callee = op->profiled_callee(); 2710 2711 // Update counter for all call types. 2712 ciMethodData* md = method->method_data_or_null(); 2713 assert(md != NULL, "Sanity"); 2714 ciProfileData* data = md->bci_to_data(bci); 2715 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2716 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2717 Register mdo = op->mdo()->as_register(); 2718 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2719 Register tmp1 = op->tmp1()->as_register_lo(); 2720 metadata2reg(md->constant_encoding(), mdo); 2721 2722 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset())); 2723 // Perform additional virtual call profiling for invokevirtual and 2724 // invokeinterface bytecodes 2725 if (op->should_profile_receiver_type()) { 2726 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2727 Register recv = op->recv()->as_register(); 2728 assert_different_registers(mdo, tmp1, recv); 2729 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2730 ciKlass* known_klass = op->known_holder(); 2731 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2732 // We know the type that will be seen at this call site; we can 2733 // statically update the MethodData* rather than needing to do 2734 // dynamic tests on the receiver type. 2735 2736 // NOTE: we should probably put a lock around this search to 2737 // avoid collisions by concurrent compilations. 2738 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2739 uint i; 2740 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2741 ciKlass* receiver = vc_data->receiver(i); 2742 if (known_klass->equals(receiver)) { 2743 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2744 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2745 return; 2746 } 2747 } 2748 2749 // Receiver type not found in profile data. Select an empty slot. 2750 2751 // Note that this is less efficient than it should be because it 2752 // always does a write to the receiver part of the 2753 // VirtualCallData rather than just the first time. 2754 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2755 ciKlass* receiver = vc_data->receiver(i); 2756 if (receiver == NULL) { 2757 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))); 2758 metadata2reg(known_klass->constant_encoding(), tmp1); 2759 __ z_stg(tmp1, recv_addr); 2760 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))); 2761 __ add2mem_64(data_addr, DataLayout::counter_increment, tmp1); 2762 return; 2763 } 2764 } 2765 } else { 2766 __ load_klass(recv, recv); 2767 NearLabel update_done; 2768 type_profile_helper(mdo, md, data, recv, tmp1, &update_done); 2769 // Receiver did not match any saved receiver and there is no empty row for it. 2770 // Increment total counter to indicate polymorphic case. 2771 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2772 __ bind(update_done); 2773 } 2774 } else { 2775 // static call 2776 __ add2mem_64(counter_addr, DataLayout::counter_increment, tmp1); 2777 } 2778 } 2779 2780 void LIR_Assembler::align_backward_branch_target() { 2781 __ align(OptoLoopAlignment); 2782 } 2783 2784 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2785 ShouldNotCallThis(); // There are no delay slots on ZARCH_64. 2786 } 2787 2788 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2789 assert(left->is_register(), "can only handle registers"); 2790 2791 if (left->is_single_cpu()) { 2792 __ z_lcr(dest->as_register(), left->as_register()); 2793 } else if (left->is_single_fpu()) { 2794 __ z_lcebr(dest->as_float_reg(), left->as_float_reg()); 2795 } else if (left->is_double_fpu()) { 2796 __ z_lcdbr(dest->as_double_reg(), left->as_double_reg()); 2797 } else { 2798 assert(left->is_double_cpu(), "Must be a long"); 2799 __ z_lcgr(dest->as_register_lo(), left->as_register_lo()); 2800 } 2801 } 2802 2803 void LIR_Assembler::fxch(int i) { 2804 ShouldNotCallThis(); // x86 only 2805 } 2806 2807 void LIR_Assembler::fld(int i) { 2808 ShouldNotCallThis(); // x86 only 2809 } 2810 2811 void LIR_Assembler::ffree(int i) { 2812 ShouldNotCallThis(); // x86 only 2813 } 2814 2815 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2816 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2817 assert(!tmp->is_valid(), "don't need temporary"); 2818 emit_call_c(dest); 2819 CHECK_BAILOUT(); 2820 if (info != NULL) { 2821 add_call_info_here(info); 2822 } 2823 } 2824 2825 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2826 ShouldNotCallThis(); // not needed on ZARCH_64 2827 } 2828 2829 void LIR_Assembler::membar() { 2830 __ z_fence(); 2831 } 2832 2833 void LIR_Assembler::membar_acquire() { 2834 __ z_acquire(); 2835 } 2836 2837 void LIR_Assembler::membar_release() { 2838 __ z_release(); 2839 } 2840 2841 void LIR_Assembler::membar_loadload() { 2842 __ z_acquire(); 2843 } 2844 2845 void LIR_Assembler::membar_storestore() { 2846 __ z_release(); 2847 } 2848 2849 void LIR_Assembler::membar_loadstore() { 2850 __ z_acquire(); 2851 } 2852 2853 void LIR_Assembler::membar_storeload() { 2854 __ z_fence(); 2855 } 2856 2857 void LIR_Assembler::on_spin_wait() { 2858 Unimplemented(); 2859 } 2860 2861 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 2862 LIR_Address* addr = addr_opr->as_address_ptr(); 2863 assert(addr->scale() == LIR_Address::times_1, "scaling unsupported"); 2864 __ load_address(dest->as_pointer_register(), as_Address(addr)); 2865 } 2866 2867 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2868 ShouldNotCallThis(); // unused 2869 } 2870 2871 #ifdef ASSERT 2872 // Emit run-time assertion. 2873 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2874 Unimplemented(); 2875 } 2876 #endif 2877 2878 void LIR_Assembler::peephole(LIR_List*) { 2879 // Do nothing for now. 2880 } 2881 2882 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2883 assert(code == lir_xadd, "lir_xchg not supported"); 2884 Address src_addr = as_Address(src->as_address_ptr()); 2885 Register base = src_addr.base(); 2886 intptr_t disp = src_addr.disp(); 2887 if (src_addr.index()->is_valid()) { 2888 // LAA and LAAG do not support index register. 2889 __ load_address(Z_R1_scratch, src_addr); 2890 base = Z_R1_scratch; 2891 disp = 0; 2892 } 2893 if (data->type() == T_INT) { 2894 __ z_laa(dest->as_register(), data->as_register(), disp, base); 2895 } else if (data->type() == T_LONG) { 2896 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); 2897 __ z_laag(dest->as_register_lo(), data->as_register_lo(), disp, base); 2898 } else { 2899 ShouldNotReachHere(); 2900 } 2901 } 2902 2903 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 2904 Register obj = op->obj()->as_register(); 2905 Register tmp1 = op->tmp()->as_pointer_register(); 2906 Register tmp2 = Z_R1_scratch; 2907 Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); 2908 ciKlass* exact_klass = op->exact_klass(); 2909 intptr_t current_klass = op->current_klass(); 2910 bool not_null = op->not_null(); 2911 bool no_conflict = op->no_conflict(); 2912 2913 Label update, next, none, null_seen, init_klass; 2914 2915 bool do_null = !not_null; 2916 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 2917 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 2918 2919 assert(do_null || do_update, "why are we here?"); 2920 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 2921 2922 __ verify_oop(obj); 2923 2924 if (do_null || tmp1 != obj DEBUG_ONLY(|| true)) { 2925 __ z_ltgr(tmp1, obj); 2926 } 2927 if (do_null) { 2928 __ z_brnz(update); 2929 if (!TypeEntries::was_null_seen(current_klass)) { 2930 __ z_lg(tmp1, mdo_addr); 2931 __ z_oill(tmp1, TypeEntries::null_seen); 2932 __ z_stg(tmp1, mdo_addr); 2933 } 2934 if (do_update) { 2935 __ z_bru(next); 2936 } 2937 } else { 2938 __ asm_assert_ne("unexpect null obj", __LINE__); 2939 } 2940 2941 __ bind(update); 2942 2943 if (do_update) { 2944 #ifdef ASSERT 2945 if (exact_klass != NULL) { 2946 __ load_klass(tmp1, tmp1); 2947 metadata2reg(exact_klass->constant_encoding(), tmp2); 2948 __ z_cgr(tmp1, tmp2); 2949 __ asm_assert_eq("exact klass and actual klass differ", __LINE__); 2950 } 2951 #endif 2952 2953 Label do_update; 2954 __ z_lg(tmp2, mdo_addr); 2955 2956 if (!no_conflict) { 2957 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 2958 if (exact_klass != NULL) { 2959 metadata2reg(exact_klass->constant_encoding(), tmp1); 2960 } else { 2961 __ load_klass(tmp1, tmp1); 2962 } 2963 2964 // Klass seen before: nothing to do (regardless of unknown bit). 2965 __ z_lgr(Z_R0_scratch, tmp2); 2966 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 2967 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 2968 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 2969 2970 // Already unknown: Nothing to do anymore. 2971 __ z_tmll(tmp2, TypeEntries::type_unknown); 2972 __ z_brc(Assembler::bcondAllOne, next); 2973 2974 if (TypeEntries::is_type_none(current_klass)) { 2975 __ z_lgr(Z_R0_scratch, tmp2); 2976 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 2977 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 2978 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, init_klass); 2979 } 2980 } else { 2981 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 2982 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 2983 2984 // Already unknown: Nothing to do anymore. 2985 __ z_tmll(tmp2, TypeEntries::type_unknown); 2986 __ z_brc(Assembler::bcondAllOne, next); 2987 } 2988 2989 // Different than before. Cannot keep accurate profile. 2990 __ z_oill(tmp2, TypeEntries::type_unknown); 2991 __ z_bru(do_update); 2992 } else { 2993 // There's a single possible klass at this profile point. 2994 assert(exact_klass != NULL, "should be"); 2995 if (TypeEntries::is_type_none(current_klass)) { 2996 metadata2reg(exact_klass->constant_encoding(), tmp1); 2997 __ z_lgr(Z_R0_scratch, tmp2); 2998 assert(Immediate::is_uimm(~TypeEntries::type_klass_mask, 16), "or change following instruction"); 2999 __ z_nill(Z_R0_scratch, TypeEntries::type_klass_mask & 0xFFFF); 3000 __ compareU64_and_branch(Z_R0_scratch, tmp1, Assembler::bcondEqual, next); 3001 #ifdef ASSERT 3002 { 3003 Label ok; 3004 __ z_lgr(Z_R0_scratch, tmp2); 3005 assert(Immediate::is_uimm(~TypeEntries::type_mask, 16), "or change following instruction"); 3006 __ z_nill(Z_R0_scratch, TypeEntries::type_mask & 0xFFFF); 3007 __ compareU64_and_branch(Z_R0_scratch, (intptr_t)0, Assembler::bcondEqual, ok); 3008 __ stop("unexpected profiling mismatch"); 3009 __ bind(ok); 3010 } 3011 #endif 3012 3013 } else { 3014 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3015 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3016 3017 // Already unknown: Nothing to do anymore. 3018 __ z_tmll(tmp2, TypeEntries::type_unknown); 3019 __ z_brc(Assembler::bcondAllOne, next); 3020 __ z_oill(tmp2, TypeEntries::type_unknown); 3021 __ z_bru(do_update); 3022 } 3023 } 3024 3025 __ bind(init_klass); 3026 // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3027 __ z_ogr(tmp2, tmp1); 3028 3029 __ bind(do_update); 3030 __ z_stg(tmp2, mdo_addr); 3031 3032 __ bind(next); 3033 } 3034 } 3035 3036 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3037 assert(op->crc()->is_single_cpu(), "crc must be register"); 3038 assert(op->val()->is_single_cpu(), "byte value must be register"); 3039 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3040 Register crc = op->crc()->as_register(); 3041 Register val = op->val()->as_register(); 3042 Register res = op->result_opr()->as_register(); 3043 3044 assert_different_registers(val, crc, res); 3045 3046 __ load_const_optimized(res, StubRoutines::crc_table_addr()); 3047 __ kernel_crc32_singleByteReg(crc, val, res, true); 3048 __ z_lgfr(res, crc); 3049 } 3050 3051 #undef __