1 /* 2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciInstance.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "nativeInst_ppc.hpp" 39 #include "oops/objArrayKlass.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/safepointMechanism.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 #define __ _masm-> 45 46 47 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5; 48 49 50 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 51 Unimplemented(); return false; // Currently not used on this platform. 52 } 53 54 55 LIR_Opr LIR_Assembler::receiverOpr() { 56 return FrameMap::R3_oop_opr; 57 } 58 59 60 LIR_Opr LIR_Assembler::osrBufferPointer() { 61 return FrameMap::R3_opr; 62 } 63 64 65 // This specifies the stack pointer decrement needed to build the frame. 66 int LIR_Assembler::initial_frame_size_in_bytes() const { 67 return in_bytes(frame_map()->framesize_in_bytes()); 68 } 69 70 71 // Inline cache check: the inline cached class is in inline_cache_reg; 72 // we fetch the class of the receiver and compare it with the cached class. 73 // If they do not match we jump to slow case. 74 int LIR_Assembler::check_icache() { 75 int offset = __ offset(); 76 __ inline_cache_check(R3_ARG1, R19_inline_cache_reg); 77 return offset; 78 } 79 80 81 void LIR_Assembler::osr_entry() { 82 // On-stack-replacement entry sequence: 83 // 84 // 1. Create a new compiled activation. 85 // 2. Initialize local variables in the compiled activation. The expression 86 // stack must be empty at the osr_bci; it is not initialized. 87 // 3. Jump to the continuation address in compiled code to resume execution. 88 89 // OSR entry point 90 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 91 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 92 ValueStack* entry_state = osr_entry->end()->state(); 93 int number_of_locks = entry_state->locks_size(); 94 95 // Create a frame for the compiled activation. 96 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 97 98 // OSR buffer is 99 // 100 // locals[nlocals-1..0] 101 // monitors[number_of_locks-1..0] 102 // 103 // Locals is a direct copy of the interpreter frame so in the osr buffer 104 // the first slot in the local array is the last local from the interpreter 105 // and the last slot is local[0] (receiver) from the interpreter. 106 // 107 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 108 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 109 // in the interpreter frame (the method lock if a sync method). 110 111 // Initialize monitors in the compiled activation. 112 // R3: pointer to osr buffer 113 // 114 // All other registers are dead at this point and the locals will be 115 // copied into place by code emitted in the IR. 116 117 Register OSR_buf = osrBufferPointer()->as_register(); 118 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 119 int monitor_offset = BytesPerWord * method()->max_locals() + 120 (2 * BytesPerWord) * (number_of_locks - 1); 121 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 122 // the OSR buffer using 2 word entries: first the lock and then 123 // the oop. 124 for (int i = 0; i < number_of_locks; i++) { 125 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 126 #ifdef ASSERT 127 // Verify the interpreter's monitor has a non-null object. 128 { 129 Label L; 130 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 131 __ cmpdi(CCR0, R0, 0); 132 __ bne(CCR0, L); 133 __ stop("locked object is NULL"); 134 __ bind(L); 135 } 136 #endif // ASSERT 137 // Copy the lock field into the compiled activation. 138 Address ml = frame_map()->address_for_monitor_lock(i), 139 mo = frame_map()->address_for_monitor_object(i); 140 assert(ml.index() == noreg && mo.index() == noreg, "sanity"); 141 __ ld(R0, slot_offset + 0, OSR_buf); 142 __ std(R0, ml.disp(), ml.base()); 143 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 144 __ std(R0, mo.disp(), mo.base()); 145 } 146 } 147 } 148 149 150 int LIR_Assembler::emit_exception_handler() { 151 // If the last instruction is a call (typically to do a throw which 152 // is coming at the end after block reordering) the return address 153 // must still point into the code area in order to avoid assertion 154 // failures when searching for the corresponding bci => add a nop 155 // (was bug 5/14/1999 - gri). 156 __ nop(); 157 158 // Generate code for the exception handler. 159 address handler_base = __ start_a_stub(exception_handler_size()); 160 161 if (handler_base == NULL) { 162 // Not enough space left for the handler. 163 bailout("exception handler overflow"); 164 return -1; 165 } 166 167 int offset = code_offset(); 168 address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 169 //__ load_const_optimized(R0, entry_point); 170 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); 171 __ mtctr(R0); 172 __ bctr(); 173 174 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 175 __ end_a_stub(); 176 177 return offset; 178 } 179 180 181 // Emit the code to remove the frame from the stack in the exception 182 // unwind path. 183 int LIR_Assembler::emit_unwind_handler() { 184 _masm->block_comment("Unwind handler"); 185 186 int offset = code_offset(); 187 bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes(); 188 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31; 189 190 // Fetch the exception from TLS and clear out exception related thread state. 191 __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 192 __ li(R0, 0); 193 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 194 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 195 196 __ bind(_unwind_handler_entry); 197 __ verify_not_null_oop(Rexception); 198 if (preserve_exception) { __ mr(Rexception_save, Rexception); } 199 200 // Perform needed unlocking 201 MonitorExitStub* stub = NULL; 202 if (method()->is_synchronized()) { 203 monitor_address(0, FrameMap::R4_opr); 204 stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); 205 __ unlock_object(R5, R6, R4, *stub->entry()); 206 __ bind(*stub->continuation()); 207 } 208 209 if (compilation()->env()->dtrace_method_probes()) { 210 Unimplemented(); 211 } 212 213 // Dispatch to the unwind logic. 214 address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); 215 //__ load_const_optimized(R0, unwind_stub); 216 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); 217 if (preserve_exception) { __ mr(Rexception, Rexception_save); } 218 __ mtctr(R0); 219 __ bctr(); 220 221 // Emit the slow path assembly. 222 if (stub != NULL) { 223 stub->emit_code(this); 224 } 225 226 return offset; 227 } 228 229 230 int LIR_Assembler::emit_deopt_handler() { 231 // If the last instruction is a call (typically to do a throw which 232 // is coming at the end after block reordering) the return address 233 // must still point into the code area in order to avoid assertion 234 // failures when searching for the corresponding bci => add a nop 235 // (was bug 5/14/1999 - gri). 236 __ nop(); 237 238 // Generate code for deopt handler. 239 address handler_base = __ start_a_stub(deopt_handler_size()); 240 241 if (handler_base == NULL) { 242 // Not enough space left for the handler. 243 bailout("deopt handler overflow"); 244 return -1; 245 } 246 247 int offset = code_offset(); 248 __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); 249 250 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 251 __ end_a_stub(); 252 253 return offset; 254 } 255 256 257 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 258 if (o == NULL) { 259 __ li(reg, 0); 260 } else { 261 AddressLiteral addrlit = __ constant_oop_address(o); 262 __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg); 263 } 264 } 265 266 267 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 268 // Allocate a new index in table to hold the object once it's been patched. 269 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 270 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 271 272 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); 273 __ load_const(reg, addrlit, R0); 274 275 patching_epilog(patch, lir_patch_normal, reg, info); 276 } 277 278 279 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 280 AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation) 281 __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg); 282 } 283 284 285 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 286 // Allocate a new index in table to hold the klass once it's been patched. 287 int index = __ oop_recorder()->allocate_metadata_index(NULL); 288 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 289 290 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); 291 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 292 __ load_const(reg, addrlit, R0); 293 294 patching_epilog(patch, lir_patch_normal, reg, info); 295 } 296 297 298 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 299 const bool is_int = result->is_single_cpu(); 300 Register Rdividend = is_int ? left->as_register() : left->as_register_lo(); 301 Register Rdivisor = noreg; 302 Register Rscratch = temp->as_register(); 303 Register Rresult = is_int ? result->as_register() : result->as_register_lo(); 304 long divisor = -1; 305 306 if (right->is_register()) { 307 Rdivisor = is_int ? right->as_register() : right->as_register_lo(); 308 } else { 309 divisor = is_int ? right->as_constant_ptr()->as_jint() 310 : right->as_constant_ptr()->as_jlong(); 311 } 312 313 assert(Rdividend != Rscratch, ""); 314 assert(Rdivisor != Rscratch, ""); 315 assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv"); 316 317 if (Rdivisor == noreg) { 318 if (divisor == 1) { // stupid, but can happen 319 if (code == lir_idiv) { 320 __ mr_if_needed(Rresult, Rdividend); 321 } else { 322 __ li(Rresult, 0); 323 } 324 325 } else if (is_power_of_2(divisor)) { 326 // Convert division by a power of two into some shifts and logical operations. 327 int log2 = log2_intptr(divisor); 328 329 // Round towards 0. 330 if (divisor == 2) { 331 if (is_int) { 332 __ srwi(Rscratch, Rdividend, 31); 333 } else { 334 __ srdi(Rscratch, Rdividend, 63); 335 } 336 } else { 337 if (is_int) { 338 __ srawi(Rscratch, Rdividend, 31); 339 } else { 340 __ sradi(Rscratch, Rdividend, 63); 341 } 342 __ clrldi(Rscratch, Rscratch, 64-log2); 343 } 344 __ add(Rscratch, Rdividend, Rscratch); 345 346 if (code == lir_idiv) { 347 if (is_int) { 348 __ srawi(Rresult, Rscratch, log2); 349 } else { 350 __ sradi(Rresult, Rscratch, log2); 351 } 352 } else { // lir_irem 353 __ clrrdi(Rscratch, Rscratch, log2); 354 __ sub(Rresult, Rdividend, Rscratch); 355 } 356 357 } else if (divisor == -1) { 358 if (code == lir_idiv) { 359 __ neg(Rresult, Rdividend); 360 } else { 361 __ li(Rresult, 0); 362 } 363 364 } else { 365 __ load_const_optimized(Rscratch, divisor); 366 if (code == lir_idiv) { 367 if (is_int) { 368 __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 369 } else { 370 __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 371 } 372 } else { 373 assert(Rscratch != R0, "need both"); 374 if (is_int) { 375 __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1. 376 __ mullw(Rscratch, R0, Rscratch); 377 } else { 378 __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1. 379 __ mulld(Rscratch, R0, Rscratch); 380 } 381 __ sub(Rresult, Rdividend, Rscratch); 382 } 383 384 } 385 return; 386 } 387 388 Label regular, done; 389 if (is_int) { 390 __ cmpwi(CCR0, Rdivisor, -1); 391 } else { 392 __ cmpdi(CCR0, Rdivisor, -1); 393 } 394 __ bne(CCR0, regular); 395 if (code == lir_idiv) { 396 __ neg(Rresult, Rdividend); 397 __ b(done); 398 __ bind(regular); 399 if (is_int) { 400 __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 401 } else { 402 __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 403 } 404 } else { // lir_irem 405 __ li(Rresult, 0); 406 __ b(done); 407 __ bind(regular); 408 if (is_int) { 409 __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 410 __ mullw(Rscratch, Rscratch, Rdivisor); 411 } else { 412 __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 413 __ mulld(Rscratch, Rscratch, Rdivisor); 414 } 415 __ sub(Rresult, Rdividend, Rscratch); 416 } 417 __ bind(done); 418 } 419 420 421 void LIR_Assembler::emit_op3(LIR_Op3* op) { 422 switch (op->code()) { 423 case lir_idiv: 424 case lir_irem: 425 arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), 426 op->result_opr(), op->info()); 427 break; 428 case lir_fmad: 429 __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), 430 op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); 431 break; 432 case lir_fmaf: 433 __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), 434 op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); 435 break; 436 default: ShouldNotReachHere(); break; 437 } 438 } 439 440 441 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 442 #ifdef ASSERT 443 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 444 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 445 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 446 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 447 #endif 448 449 Label *L = op->label(); 450 if (op->cond() == lir_cond_always) { 451 __ b(*L); 452 } else { 453 Label done; 454 bool is_unordered = false; 455 if (op->code() == lir_cond_float_branch) { 456 assert(op->ublock() != NULL, "must have unordered successor"); 457 is_unordered = true; 458 } else { 459 assert(op->code() == lir_branch, "just checking"); 460 } 461 462 bool positive = false; 463 Assembler::Condition cond = Assembler::equal; 464 switch (op->cond()) { 465 case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break; 466 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break; 467 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 468 case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 469 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 470 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 471 case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 472 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 473 default: ShouldNotReachHere(); 474 } 475 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 476 int bi = Assembler::bi0(BOOL_RESULT, cond); 477 if (is_unordered) { 478 if (positive) { 479 if (op->ublock() == op->block()) { 480 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L); 481 } 482 } else { 483 if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); } 484 } 485 } 486 __ bc_far_optimized(bo, bi, *L); 487 __ bind(done); 488 } 489 } 490 491 492 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 493 Bytecodes::Code code = op->bytecode(); 494 LIR_Opr src = op->in_opr(), 495 dst = op->result_opr(); 496 497 switch(code) { 498 case Bytecodes::_i2l: { 499 __ extsw(dst->as_register_lo(), src->as_register()); 500 break; 501 } 502 case Bytecodes::_l2i: { 503 __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage 504 break; 505 } 506 case Bytecodes::_i2b: { 507 __ extsb(dst->as_register(), src->as_register()); 508 break; 509 } 510 case Bytecodes::_i2c: { 511 __ clrldi(dst->as_register(), src->as_register(), 64-16); 512 break; 513 } 514 case Bytecodes::_i2s: { 515 __ extsh(dst->as_register(), src->as_register()); 516 break; 517 } 518 case Bytecodes::_i2d: 519 case Bytecodes::_l2d: { 520 bool src_in_memory = !VM_Version::has_mtfprd(); 521 FloatRegister rdst = dst->as_double_reg(); 522 FloatRegister rsrc; 523 if (src_in_memory) { 524 rsrc = src->as_double_reg(); // via mem 525 } else { 526 // move src to dst register 527 if (code == Bytecodes::_i2d) { 528 __ mtfprwa(rdst, src->as_register()); 529 } else { 530 __ mtfprd(rdst, src->as_register_lo()); 531 } 532 rsrc = rdst; 533 } 534 __ fcfid(rdst, rsrc); 535 break; 536 } 537 case Bytecodes::_i2f: 538 case Bytecodes::_l2f: { 539 bool src_in_memory = !VM_Version::has_mtfprd(); 540 FloatRegister rdst = dst->as_float_reg(); 541 FloatRegister rsrc; 542 if (src_in_memory) { 543 rsrc = src->as_double_reg(); // via mem 544 } else { 545 // move src to dst register 546 if (code == Bytecodes::_i2f) { 547 __ mtfprwa(rdst, src->as_register()); 548 } else { 549 __ mtfprd(rdst, src->as_register_lo()); 550 } 551 rsrc = rdst; 552 } 553 if (VM_Version::has_fcfids()) { 554 __ fcfids(rdst, rsrc); 555 } else { 556 assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility"); 557 __ fcfid(rdst, rsrc); 558 __ frsp(rdst, rdst); 559 } 560 break; 561 } 562 case Bytecodes::_f2d: { 563 __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); 564 break; 565 } 566 case Bytecodes::_d2f: { 567 __ frsp(dst->as_float_reg(), src->as_double_reg()); 568 break; 569 } 570 case Bytecodes::_d2i: 571 case Bytecodes::_f2i: { 572 bool dst_in_memory = !VM_Version::has_mtfprd(); 573 FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg(); 574 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 575 Label L; 576 // Result must be 0 if value is NaN; test by comparing value to itself. 577 __ fcmpu(CCR0, rsrc, rsrc); 578 if (dst_in_memory) { 579 __ li(R0, 0); // 0 in case of NAN 580 __ std(R0, addr.disp(), addr.base()); 581 } else { 582 __ li(dst->as_register(), 0); 583 } 584 __ bso(CCR0, L); 585 __ fctiwz(rsrc, rsrc); // USE_KILL 586 if (dst_in_memory) { 587 __ stfd(rsrc, addr.disp(), addr.base()); 588 } else { 589 __ mffprd(dst->as_register(), rsrc); 590 } 591 __ bind(L); 592 break; 593 } 594 case Bytecodes::_d2l: 595 case Bytecodes::_f2l: { 596 bool dst_in_memory = !VM_Version::has_mtfprd(); 597 FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg(); 598 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 599 Label L; 600 // Result must be 0 if value is NaN; test by comparing value to itself. 601 __ fcmpu(CCR0, rsrc, rsrc); 602 if (dst_in_memory) { 603 __ li(R0, 0); // 0 in case of NAN 604 __ std(R0, addr.disp(), addr.base()); 605 } else { 606 __ li(dst->as_register_lo(), 0); 607 } 608 __ bso(CCR0, L); 609 __ fctidz(rsrc, rsrc); // USE_KILL 610 if (dst_in_memory) { 611 __ stfd(rsrc, addr.disp(), addr.base()); 612 } else { 613 __ mffprd(dst->as_register_lo(), rsrc); 614 } 615 __ bind(L); 616 break; 617 } 618 619 default: ShouldNotReachHere(); 620 } 621 } 622 623 624 void LIR_Assembler::align_call(LIR_Code) { 625 // do nothing since all instructions are word aligned on ppc 626 } 627 628 629 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { 630 int start_offset = __ offset(); 631 // Put the entry point as a constant into the constant pool. 632 const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); 633 if (entry_point_toc_addr == NULL) { 634 bailout("const section overflow"); 635 return false; 636 } 637 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 638 639 // Emit the trampoline stub which will be related to the branch-and-link below. 640 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); 641 if (!stub) { 642 bailout("no space for trampoline stub"); 643 return false; 644 } 645 return true; 646 } 647 648 649 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 650 assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype"); 651 652 bool success = emit_trampoline_stub_for_call(op->addr()); 653 if (!success) { return; } 654 655 __ relocate(rtype); 656 // Note: At this point we do not have the address of the trampoline 657 // stub, and the entry point might be too far away for bl, so __ pc() 658 // serves as dummy and the bl will be patched later. 659 __ code()->set_insts_mark(); 660 __ bl(__ pc()); 661 add_call_info(code_offset(), op->info()); 662 } 663 664 665 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 666 __ calculate_address_from_global_toc(R2_TOC, __ method_toc()); 667 668 // Virtual call relocation will point to ic load. 669 address virtual_call_meta_addr = __ pc(); 670 // Load a clear inline cache. 671 AddressLiteral empty_ic((address) Universe::non_oop_word()); 672 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); 673 if (!success) { 674 bailout("const section overflow"); 675 return; 676 } 677 // Call to fixup routine. Fixup routine uses ScopeDesc info 678 // to determine who we intended to call. 679 __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); 680 681 success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); 682 if (!success) { return; } 683 684 // Note: At this point we do not have the address of the trampoline 685 // stub, and the entry point might be too far away for bl, so __ pc() 686 // serves as dummy and the bl will be patched later. 687 __ bl(__ pc()); 688 add_call_info(code_offset(), op->info()); 689 } 690 691 692 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 693 ShouldNotReachHere(); // ic_call is used instead. 694 } 695 696 697 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) { 698 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info); 699 __ null_check(addr, stub->entry()); 700 append_code_stub(stub); 701 } 702 703 704 // Attention: caller must encode oop if needed 705 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 706 int store_offset; 707 if (!Assembler::is_simm16(offset)) { 708 // For offsets larger than a simm16 we setup the offset. 709 assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case"); 710 __ load_const_optimized(R0, offset); 711 store_offset = store(from_reg, base, R0, type, wide); 712 } else { 713 store_offset = code_offset(); 714 switch (type) { 715 case T_BOOLEAN: // fall through 716 case T_BYTE : __ stb(from_reg->as_register(), offset, base); break; 717 case T_CHAR : 718 case T_SHORT : __ sth(from_reg->as_register(), offset, base); break; 719 case T_INT : __ stw(from_reg->as_register(), offset, base); break; 720 case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break; 721 case T_ADDRESS: 722 case T_METADATA: __ std(from_reg->as_register(), offset, base); break; 723 case T_ARRAY : // fall through 724 case T_OBJECT: 725 { 726 if (UseCompressedOops && !wide) { 727 // Encoding done in caller 728 __ stw(from_reg->as_register(), offset, base); 729 } else { 730 __ std(from_reg->as_register(), offset, base); 731 } 732 __ verify_oop(from_reg->as_register()); 733 break; 734 } 735 case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break; 736 case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break; 737 default : ShouldNotReachHere(); 738 } 739 } 740 return store_offset; 741 } 742 743 744 // Attention: caller must encode oop if needed 745 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 746 int store_offset = code_offset(); 747 switch (type) { 748 case T_BOOLEAN: // fall through 749 case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break; 750 case T_CHAR : 751 case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break; 752 case T_INT : __ stwx(from_reg->as_register(), base, disp); break; 753 case T_LONG : 754 #ifdef _LP64 755 __ stdx(from_reg->as_register_lo(), base, disp); 756 #else 757 Unimplemented(); 758 #endif 759 break; 760 case T_ADDRESS: 761 __ stdx(from_reg->as_register(), base, disp); 762 break; 763 case T_ARRAY : // fall through 764 case T_OBJECT: 765 { 766 if (UseCompressedOops && !wide) { 767 // Encoding done in caller. 768 __ stwx(from_reg->as_register(), base, disp); 769 } else { 770 __ stdx(from_reg->as_register(), base, disp); 771 } 772 __ verify_oop(from_reg->as_register()); // kills R0 773 break; 774 } 775 case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break; 776 case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break; 777 default : ShouldNotReachHere(); 778 } 779 return store_offset; 780 } 781 782 783 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 784 int load_offset; 785 if (!Assembler::is_simm16(offset)) { 786 // For offsets larger than a simm16 we setup the offset. 787 __ load_const_optimized(R0, offset); 788 load_offset = load(base, R0, to_reg, type, wide); 789 } else { 790 load_offset = code_offset(); 791 switch(type) { 792 case T_BOOLEAN: // fall through 793 case T_BYTE : __ lbz(to_reg->as_register(), offset, base); 794 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 795 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break; 796 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break; 797 case T_INT : __ lwa(to_reg->as_register(), offset, base); break; 798 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break; 799 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break; 800 case T_ADDRESS: 801 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 802 __ lwz(to_reg->as_register(), offset, base); 803 __ decode_klass_not_null(to_reg->as_register()); 804 } else { 805 __ ld(to_reg->as_register(), offset, base); 806 } 807 break; 808 case T_ARRAY : // fall through 809 case T_OBJECT: 810 { 811 if (UseCompressedOops && !wide) { 812 __ lwz(to_reg->as_register(), offset, base); 813 __ decode_heap_oop(to_reg->as_register()); 814 } else { 815 __ ld(to_reg->as_register(), offset, base); 816 } 817 __ verify_oop(to_reg->as_register()); 818 break; 819 } 820 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break; 821 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break; 822 default : ShouldNotReachHere(); 823 } 824 } 825 return load_offset; 826 } 827 828 829 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 830 int load_offset = code_offset(); 831 switch(type) { 832 case T_BOOLEAN: // fall through 833 case T_BYTE : __ lbzx(to_reg->as_register(), base, disp); 834 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 835 case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break; 836 case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break; 837 case T_INT : __ lwax(to_reg->as_register(), base, disp); break; 838 case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break; 839 case T_ARRAY : // fall through 840 case T_OBJECT: 841 { 842 if (UseCompressedOops && !wide) { 843 __ lwzx(to_reg->as_register(), base, disp); 844 __ decode_heap_oop(to_reg->as_register()); 845 } else { 846 __ ldx(to_reg->as_register(), base, disp); 847 } 848 __ verify_oop(to_reg->as_register()); 849 break; 850 } 851 case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break; 852 case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break; 853 case T_LONG : 854 #ifdef _LP64 855 __ ldx(to_reg->as_register_lo(), base, disp); 856 #else 857 Unimplemented(); 858 #endif 859 break; 860 default : ShouldNotReachHere(); 861 } 862 return load_offset; 863 } 864 865 866 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 867 LIR_Const* c = src->as_constant_ptr(); 868 Register src_reg = R0; 869 switch (c->type()) { 870 case T_INT: 871 case T_FLOAT: { 872 int value = c->as_jint_bits(); 873 __ load_const_optimized(src_reg, value); 874 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 875 __ stw(src_reg, addr.disp(), addr.base()); 876 break; 877 } 878 case T_ADDRESS: { 879 int value = c->as_jint_bits(); 880 __ load_const_optimized(src_reg, value); 881 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 882 __ std(src_reg, addr.disp(), addr.base()); 883 break; 884 } 885 case T_OBJECT: { 886 jobject2reg(c->as_jobject(), src_reg); 887 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 888 __ std(src_reg, addr.disp(), addr.base()); 889 break; 890 } 891 case T_LONG: 892 case T_DOUBLE: { 893 int value = c->as_jlong_bits(); 894 __ load_const_optimized(src_reg, value); 895 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 896 __ std(src_reg, addr.disp(), addr.base()); 897 break; 898 } 899 default: 900 Unimplemented(); 901 } 902 } 903 904 905 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 906 LIR_Const* c = src->as_constant_ptr(); 907 LIR_Address* addr = dest->as_address_ptr(); 908 Register base = addr->base()->as_pointer_register(); 909 LIR_Opr tmp = LIR_OprFact::illegalOpr; 910 int offset = -1; 911 // Null check for large offsets in LIRGenerator::do_StoreField. 912 bool needs_explicit_null_check = !ImplicitNullChecks; 913 914 if (info != NULL && needs_explicit_null_check) { 915 explicit_null_check(base, info); 916 } 917 918 switch (c->type()) { 919 case T_FLOAT: type = T_INT; 920 case T_INT: 921 case T_ADDRESS: { 922 tmp = FrameMap::R0_opr; 923 __ load_const_optimized(tmp->as_register(), c->as_jint_bits()); 924 break; 925 } 926 case T_DOUBLE: type = T_LONG; 927 case T_LONG: { 928 tmp = FrameMap::R0_long_opr; 929 __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits()); 930 break; 931 } 932 case T_OBJECT: { 933 tmp = FrameMap::R0_opr; 934 if (UseCompressedOops && !wide && c->as_jobject() != NULL) { 935 AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); 936 __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw). 937 __ relocate(oop_addr.rspec(), /*compressed format*/ 1); 938 __ ori(R0, R0, oop_addr.value() & 0xffff); 939 } else { 940 jobject2reg(c->as_jobject(), R0); 941 } 942 break; 943 } 944 default: 945 Unimplemented(); 946 } 947 948 // Handle either reg+reg or reg+disp address. 949 if (addr->index()->is_valid()) { 950 assert(addr->disp() == 0, "must be zero"); 951 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 952 } else { 953 assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses"); 954 offset = store(tmp, base, addr->disp(), type, wide, false); 955 } 956 957 if (info != NULL) { 958 assert(offset != -1, "offset should've been set"); 959 if (!needs_explicit_null_check) { 960 add_debug_info_for_null_check(offset, info); 961 } 962 } 963 } 964 965 966 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 967 LIR_Const* c = src->as_constant_ptr(); 968 LIR_Opr to_reg = dest; 969 970 switch (c->type()) { 971 case T_INT: { 972 assert(patch_code == lir_patch_none, "no patching handled here"); 973 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); 974 break; 975 } 976 case T_ADDRESS: { 977 assert(patch_code == lir_patch_none, "no patching handled here"); 978 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ... 979 break; 980 } 981 case T_LONG: { 982 assert(patch_code == lir_patch_none, "no patching handled here"); 983 __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0); 984 break; 985 } 986 987 case T_OBJECT: { 988 if (patch_code == lir_patch_none) { 989 jobject2reg(c->as_jobject(), to_reg->as_register()); 990 } else { 991 jobject2reg_with_patching(to_reg->as_register(), info); 992 } 993 break; 994 } 995 996 case T_METADATA: 997 { 998 if (patch_code == lir_patch_none) { 999 metadata2reg(c->as_metadata(), to_reg->as_register()); 1000 } else { 1001 klass2reg_with_patching(to_reg->as_register(), info); 1002 } 1003 } 1004 break; 1005 1006 case T_FLOAT: 1007 { 1008 if (to_reg->is_single_fpu()) { 1009 address const_addr = __ float_constant(c->as_jfloat()); 1010 if (const_addr == NULL) { 1011 bailout("const section overflow"); 1012 break; 1013 } 1014 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1015 __ relocate(rspec); 1016 __ load_const(R0, const_addr); 1017 __ lfsx(to_reg->as_float_reg(), R0); 1018 } else { 1019 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1020 __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0); 1021 } 1022 } 1023 break; 1024 1025 case T_DOUBLE: 1026 { 1027 if (to_reg->is_double_fpu()) { 1028 address const_addr = __ double_constant(c->as_jdouble()); 1029 if (const_addr == NULL) { 1030 bailout("const section overflow"); 1031 break; 1032 } 1033 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1034 __ relocate(rspec); 1035 __ load_const(R0, const_addr); 1036 __ lfdx(to_reg->as_double_reg(), R0); 1037 } else { 1038 assert(to_reg->is_double_cpu(), "Must be a long register."); 1039 __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0); 1040 } 1041 } 1042 break; 1043 1044 default: 1045 ShouldNotReachHere(); 1046 } 1047 } 1048 1049 1050 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1051 Unimplemented(); return Address(); 1052 } 1053 1054 1055 inline RegisterOrConstant index_or_disp(LIR_Address* addr) { 1056 if (addr->index()->is_illegal()) { 1057 return (RegisterOrConstant)(addr->disp()); 1058 } else { 1059 return (RegisterOrConstant)(addr->index()->as_pointer_register()); 1060 } 1061 } 1062 1063 1064 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1065 const Register tmp = R0; 1066 switch (type) { 1067 case T_INT: 1068 case T_FLOAT: { 1069 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1070 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1071 __ lwz(tmp, from.disp(), from.base()); 1072 __ stw(tmp, to.disp(), to.base()); 1073 break; 1074 } 1075 case T_ADDRESS: 1076 case T_OBJECT: { 1077 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1078 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1079 __ ld(tmp, from.disp(), from.base()); 1080 __ std(tmp, to.disp(), to.base()); 1081 break; 1082 } 1083 case T_LONG: 1084 case T_DOUBLE: { 1085 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1086 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1087 __ ld(tmp, from.disp(), from.base()); 1088 __ std(tmp, to.disp(), to.base()); 1089 break; 1090 } 1091 1092 default: 1093 ShouldNotReachHere(); 1094 } 1095 } 1096 1097 1098 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1099 Unimplemented(); return Address(); 1100 } 1101 1102 1103 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1104 Unimplemented(); return Address(); 1105 } 1106 1107 1108 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1109 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1110 1111 assert(type != T_METADATA, "load of metadata ptr not supported"); 1112 LIR_Address* addr = src_opr->as_address_ptr(); 1113 LIR_Opr to_reg = dest; 1114 1115 Register src = addr->base()->as_pointer_register(); 1116 Register disp_reg = noreg; 1117 int disp_value = addr->disp(); 1118 bool needs_patching = (patch_code != lir_patch_none); 1119 // null check for large offsets in LIRGenerator::do_LoadField 1120 bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; 1121 1122 if (info != NULL && needs_explicit_null_check) { 1123 explicit_null_check(src, info); 1124 } 1125 1126 if (addr->base()->type() == T_OBJECT) { 1127 __ verify_oop(src); 1128 } 1129 1130 PatchingStub* patch = NULL; 1131 if (needs_patching) { 1132 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1133 assert(!to_reg->is_double_cpu() || 1134 patch_code == lir_patch_none || 1135 patch_code == lir_patch_normal, "patching doesn't match register"); 1136 } 1137 1138 if (addr->index()->is_illegal()) { 1139 if (!Assembler::is_simm16(disp_value)) { 1140 if (needs_patching) { 1141 __ load_const32(R0, 0); // patchable int 1142 } else { 1143 __ load_const_optimized(R0, disp_value); 1144 } 1145 disp_reg = R0; 1146 } 1147 } else { 1148 disp_reg = addr->index()->as_pointer_register(); 1149 assert(disp_value == 0, "can't handle 3 operand addresses"); 1150 } 1151 1152 // Remember the offset of the load. The patching_epilog must be done 1153 // before the call to add_debug_info, otherwise the PcDescs don't get 1154 // entered in increasing order. 1155 int offset; 1156 1157 if (disp_reg == noreg) { 1158 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1159 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1160 } else { 1161 assert(!unaligned, "unexpected"); 1162 offset = load(src, disp_reg, to_reg, type, wide); 1163 } 1164 1165 if (patch != NULL) { 1166 patching_epilog(patch, patch_code, src, info); 1167 } 1168 if (info != NULL && !needs_explicit_null_check) { 1169 add_debug_info_for_null_check(offset, info); 1170 } 1171 } 1172 1173 1174 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1175 Address addr; 1176 if (src->is_single_word()) { 1177 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1178 } else if (src->is_double_word()) { 1179 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1180 } 1181 1182 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1183 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1184 } 1185 1186 1187 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1188 Address addr; 1189 if (dest->is_single_word()) { 1190 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1191 } else if (dest->is_double_word()) { 1192 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1193 } 1194 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1195 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1196 } 1197 1198 1199 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1200 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1201 if (from_reg->is_double_fpu()) { 1202 // double to double moves 1203 assert(to_reg->is_double_fpu(), "should match"); 1204 __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg()); 1205 } else { 1206 // float to float moves 1207 assert(to_reg->is_single_fpu(), "should match"); 1208 __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg()); 1209 } 1210 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1211 if (from_reg->is_double_cpu()) { 1212 __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1213 } else if (to_reg->is_double_cpu()) { 1214 // int to int moves 1215 __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register()); 1216 } else { 1217 // int to int moves 1218 __ mr_if_needed(to_reg->as_register(), from_reg->as_register()); 1219 } 1220 } else { 1221 ShouldNotReachHere(); 1222 } 1223 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1224 __ verify_oop(to_reg->as_register()); 1225 } 1226 } 1227 1228 1229 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1230 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1231 bool wide, bool unaligned) { 1232 assert(type != T_METADATA, "store of metadata ptr not supported"); 1233 LIR_Address* addr = dest->as_address_ptr(); 1234 1235 Register src = addr->base()->as_pointer_register(); 1236 Register disp_reg = noreg; 1237 int disp_value = addr->disp(); 1238 bool needs_patching = (patch_code != lir_patch_none); 1239 bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide && 1240 Universe::narrow_oop_mode() != Universe::UnscaledNarrowOop; 1241 bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value); 1242 bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29. 1243 // Null check for large offsets in LIRGenerator::do_StoreField. 1244 bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; 1245 1246 if (info != NULL && needs_explicit_null_check) { 1247 explicit_null_check(src, info); 1248 } 1249 1250 if (addr->base()->is_oop_register()) { 1251 __ verify_oop(src); 1252 } 1253 1254 PatchingStub* patch = NULL; 1255 if (needs_patching) { 1256 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1257 assert(!from_reg->is_double_cpu() || 1258 patch_code == lir_patch_none || 1259 patch_code == lir_patch_normal, "patching doesn't match register"); 1260 } 1261 1262 if (addr->index()->is_illegal()) { 1263 if (load_disp) { 1264 disp_reg = use_R29 ? R29_TOC : R0; 1265 if (needs_patching) { 1266 __ load_const32(disp_reg, 0); // patchable int 1267 } else { 1268 __ load_const_optimized(disp_reg, disp_value); 1269 } 1270 } 1271 } else { 1272 disp_reg = addr->index()->as_pointer_register(); 1273 assert(disp_value == 0, "can't handle 3 operand addresses"); 1274 } 1275 1276 // remember the offset of the store. The patching_epilog must be done 1277 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1278 // entered in increasing order. 1279 int offset; 1280 1281 if (compress_oop) { 1282 Register co = __ encode_heap_oop(R0, from_reg->as_register()); 1283 from_reg = FrameMap::as_opr(co); 1284 } 1285 1286 if (disp_reg == noreg) { 1287 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1288 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1289 } else { 1290 assert(!unaligned, "unexpected"); 1291 offset = store(from_reg, src, disp_reg, type, wide); 1292 } 1293 1294 if (use_R29) { 1295 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit 1296 } 1297 1298 if (patch != NULL) { 1299 patching_epilog(patch, patch_code, src, info); 1300 } 1301 1302 if (info != NULL && !needs_explicit_null_check) { 1303 add_debug_info_for_null_check(offset, info); 1304 } 1305 } 1306 1307 1308 void LIR_Assembler::return_op(LIR_Opr result) { 1309 const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). 1310 const Register polling_page = R12; 1311 1312 // Pop the stack before the safepoint code. 1313 int frame_size = initial_frame_size_in_bytes(); 1314 if (Assembler::is_simm(frame_size, 16)) { 1315 __ addi(R1_SP, R1_SP, frame_size); 1316 } else { 1317 __ pop_frame(); 1318 } 1319 1320 if (SafepointMechanism::uses_thread_local_poll()) { 1321 __ ld(polling_page, in_bytes(Thread::polling_page_offset()), R16_thread); 1322 } else { 1323 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); 1324 } 1325 1326 // Restore return pc relative to callers' sp. 1327 __ ld(return_pc, _abi(lr), R1_SP); 1328 // Move return pc to LR. 1329 __ mtlr(return_pc); 1330 1331 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1332 __ reserved_stack_check(return_pc); 1333 } 1334 1335 // We need to mark the code position where the load from the safepoint 1336 // polling page was emitted as relocInfo::poll_return_type here. 1337 __ relocate(relocInfo::poll_return_type); 1338 __ load_from_polling_page(polling_page); 1339 1340 // Return. 1341 __ blr(); 1342 } 1343 1344 1345 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1346 const Register poll_addr = tmp->as_register(); 1347 if (SafepointMechanism::uses_thread_local_poll()) { 1348 __ ld(poll_addr, in_bytes(Thread::polling_page_offset()), R16_thread); 1349 } else { 1350 __ load_const_optimized(poll_addr, (intptr_t)os::get_polling_page(), R0); 1351 } 1352 if (info != NULL) { 1353 add_debug_info_for_branch(info); 1354 } 1355 int offset = __ offset(); 1356 __ relocate(relocInfo::poll_type); 1357 __ load_from_polling_page(poll_addr); 1358 1359 return offset; 1360 } 1361 1362 1363 void LIR_Assembler::emit_static_call_stub() { 1364 address call_pc = __ pc(); 1365 address stub = __ start_a_stub(static_call_stub_size()); 1366 if (stub == NULL) { 1367 bailout("static call stub overflow"); 1368 return; 1369 } 1370 1371 // For java_to_interp stubs we use R11_scratch1 as scratch register 1372 // and in call trampoline stubs we use R12_scratch2. This way we 1373 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1374 const Register reg_scratch = R11_scratch1; 1375 1376 // Create a static stub relocation which relates this stub 1377 // with the call instruction at insts_call_instruction_offset in the 1378 // instructions code-section. 1379 int start = __ offset(); 1380 __ relocate(static_stub_Relocation::spec(call_pc)); 1381 1382 // Now, create the stub's code: 1383 // - load the TOC 1384 // - load the inline cache oop from the constant pool 1385 // - load the call target from the constant pool 1386 // - call 1387 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1388 AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); 1389 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); 1390 1391 if (ReoptimizeCallSequences) { 1392 __ b64_patchable((address)-1, relocInfo::none); 1393 } else { 1394 AddressLiteral a((address)-1); 1395 success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); 1396 __ mtctr(reg_scratch); 1397 __ bctr(); 1398 } 1399 if (!success) { 1400 bailout("const section overflow"); 1401 return; 1402 } 1403 1404 assert(__ offset() - start <= static_call_stub_size(), "stub too big"); 1405 __ end_a_stub(); 1406 } 1407 1408 1409 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1410 bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual); 1411 if (opr1->is_single_fpu()) { 1412 __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg()); 1413 } else if (opr1->is_double_fpu()) { 1414 __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg()); 1415 } else if (opr1->is_single_cpu()) { 1416 if (opr2->is_constant()) { 1417 switch (opr2->as_constant_ptr()->type()) { 1418 case T_INT: 1419 { 1420 jint con = opr2->as_constant_ptr()->as_jint(); 1421 if (unsigned_comp) { 1422 if (Assembler::is_uimm(con, 16)) { 1423 __ cmplwi(BOOL_RESULT, opr1->as_register(), con); 1424 } else { 1425 __ load_const_optimized(R0, con); 1426 __ cmplw(BOOL_RESULT, opr1->as_register(), R0); 1427 } 1428 } else { 1429 if (Assembler::is_simm(con, 16)) { 1430 __ cmpwi(BOOL_RESULT, opr1->as_register(), con); 1431 } else { 1432 __ load_const_optimized(R0, con); 1433 __ cmpw(BOOL_RESULT, opr1->as_register(), R0); 1434 } 1435 } 1436 } 1437 break; 1438 1439 case T_OBJECT: 1440 // There are only equal/notequal comparisons on objects. 1441 { 1442 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1443 jobject con = opr2->as_constant_ptr()->as_jobject(); 1444 if (con == NULL) { 1445 __ cmpdi(BOOL_RESULT, opr1->as_register(), 0); 1446 } else { 1447 jobject2reg(con, R0); 1448 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1449 } 1450 } 1451 break; 1452 1453 default: 1454 ShouldNotReachHere(); 1455 break; 1456 } 1457 } else { 1458 if (opr2->is_address()) { 1459 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1460 LIR_Address *addr = opr2->as_address_ptr(); 1461 BasicType type = addr->type(); 1462 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1463 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1464 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1465 } else { 1466 if (unsigned_comp) { 1467 __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1468 } else { 1469 __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1470 } 1471 } 1472 } 1473 } else if (opr1->is_double_cpu()) { 1474 if (opr2->is_constant()) { 1475 jlong con = opr2->as_constant_ptr()->as_jlong(); 1476 if (unsigned_comp) { 1477 if (Assembler::is_uimm(con, 16)) { 1478 __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con); 1479 } else { 1480 __ load_const_optimized(R0, con); 1481 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0); 1482 } 1483 } else { 1484 if (Assembler::is_simm(con, 16)) { 1485 __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con); 1486 } else { 1487 __ load_const_optimized(R0, con); 1488 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0); 1489 } 1490 } 1491 } else if (opr2->is_register()) { 1492 if (unsigned_comp) { 1493 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1494 } else { 1495 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1496 } 1497 } else { 1498 ShouldNotReachHere(); 1499 } 1500 } else if (opr1->is_address()) { 1501 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1502 LIR_Address * addr = opr1->as_address_ptr(); 1503 BasicType type = addr->type(); 1504 assert (opr2->is_constant(), "Checking"); 1505 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1506 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1507 __ cmpdi(BOOL_RESULT, R0, opr2->as_constant_ptr()->as_jint()); 1508 } else { 1509 ShouldNotReachHere(); 1510 } 1511 } 1512 1513 1514 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1515 const Register Rdst = dst->as_register(); 1516 Label done; 1517 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1518 bool is_unordered_less = (code == lir_ucmp_fd2i); 1519 if (left->is_single_fpu()) { 1520 __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg()); 1521 } else if (left->is_double_fpu()) { 1522 __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg()); 1523 } else { 1524 ShouldNotReachHere(); 1525 } 1526 __ li(Rdst, is_unordered_less ? -1 : 1); 1527 __ bso(CCR0, done); 1528 } else if (code == lir_cmp_l2i) { 1529 __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo()); 1530 } else { 1531 ShouldNotReachHere(); 1532 } 1533 __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1534 __ srwi(Rdst, R0, 30); 1535 __ srawi(R0, R0, 31); 1536 __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1 1537 __ bind(done); 1538 } 1539 1540 1541 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { 1542 if (src->is_constant()) { 1543 lasm->const2reg(src, dst, lir_patch_none, NULL); 1544 } else if (src->is_register()) { 1545 lasm->reg2reg(src, dst); 1546 } else if (src->is_stack()) { 1547 lasm->stack2reg(src, dst, dst->type()); 1548 } else { 1549 ShouldNotReachHere(); 1550 } 1551 } 1552 1553 1554 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1555 if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) { 1556 load_to_reg(this, opr1, result); // Condition doesn't matter. 1557 return; 1558 } 1559 1560 bool positive = false; 1561 Assembler::Condition cond = Assembler::equal; 1562 switch (condition) { 1563 case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; 1564 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; 1565 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 1566 case lir_cond_belowEqual: 1567 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 1568 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 1569 case lir_cond_aboveEqual: 1570 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 1571 default: ShouldNotReachHere(); 1572 } 1573 1574 // Try to use isel on >=Power7. 1575 if (VM_Version::has_isel() && result->is_cpu_register()) { 1576 bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); 1577 const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo(); 1578 1579 // We can use result_reg to load one operand if not already in register. 1580 Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg, 1581 second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg; 1582 1583 if (first != second) { 1584 if (!o1_is_reg) { 1585 load_to_reg(this, opr1, result); 1586 } 1587 1588 if (!o2_is_reg) { 1589 load_to_reg(this, opr2, result); 1590 } 1591 1592 __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second); 1593 return; 1594 } 1595 } // isel 1596 1597 load_to_reg(this, opr1, result); 1598 1599 Label skip; 1600 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1601 int bi = Assembler::bi0(BOOL_RESULT, cond); 1602 __ bc(bo, bi, skip); 1603 1604 load_to_reg(this, opr2, result); 1605 __ bind(skip); 1606 } 1607 1608 1609 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1610 CodeEmitInfo* info, bool pop_fpu_stack) { 1611 assert(info == NULL, "unused on this code path"); 1612 assert(left->is_register(), "wrong items state"); 1613 assert(dest->is_register(), "wrong items state"); 1614 1615 if (right->is_register()) { 1616 if (dest->is_float_kind()) { 1617 1618 FloatRegister lreg, rreg, res; 1619 if (right->is_single_fpu()) { 1620 lreg = left->as_float_reg(); 1621 rreg = right->as_float_reg(); 1622 res = dest->as_float_reg(); 1623 switch (code) { 1624 case lir_add: __ fadds(res, lreg, rreg); break; 1625 case lir_sub: __ fsubs(res, lreg, rreg); break; 1626 case lir_mul: // fall through 1627 case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break; 1628 case lir_div: // fall through 1629 case lir_div_strictfp: __ fdivs(res, lreg, rreg); break; 1630 default: ShouldNotReachHere(); 1631 } 1632 } else { 1633 lreg = left->as_double_reg(); 1634 rreg = right->as_double_reg(); 1635 res = dest->as_double_reg(); 1636 switch (code) { 1637 case lir_add: __ fadd(res, lreg, rreg); break; 1638 case lir_sub: __ fsub(res, lreg, rreg); break; 1639 case lir_mul: // fall through 1640 case lir_mul_strictfp: __ fmul(res, lreg, rreg); break; 1641 case lir_div: // fall through 1642 case lir_div_strictfp: __ fdiv(res, lreg, rreg); break; 1643 default: ShouldNotReachHere(); 1644 } 1645 } 1646 1647 } else if (dest->is_double_cpu()) { 1648 1649 Register dst_lo = dest->as_register_lo(); 1650 Register op1_lo = left->as_pointer_register(); 1651 Register op2_lo = right->as_pointer_register(); 1652 1653 switch (code) { 1654 case lir_add: __ add(dst_lo, op1_lo, op2_lo); break; 1655 case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break; 1656 case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break; 1657 default: ShouldNotReachHere(); 1658 } 1659 } else { 1660 assert (right->is_single_cpu(), "Just Checking"); 1661 1662 Register lreg = left->as_register(); 1663 Register res = dest->as_register(); 1664 Register rreg = right->as_register(); 1665 switch (code) { 1666 case lir_add: __ add (res, lreg, rreg); break; 1667 case lir_sub: __ sub (res, lreg, rreg); break; 1668 case lir_mul: __ mullw(res, lreg, rreg); break; 1669 default: ShouldNotReachHere(); 1670 } 1671 } 1672 } else { 1673 assert (right->is_constant(), "must be constant"); 1674 1675 if (dest->is_single_cpu()) { 1676 Register lreg = left->as_register(); 1677 Register res = dest->as_register(); 1678 int simm16 = right->as_constant_ptr()->as_jint(); 1679 1680 switch (code) { 1681 case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int 1682 simm16 = -simm16; 1683 case lir_add: if (res == lreg && simm16 == 0) break; 1684 __ addi(res, lreg, simm16); break; 1685 case lir_mul: if (res == lreg && simm16 == 1) break; 1686 __ mulli(res, lreg, simm16); break; 1687 default: ShouldNotReachHere(); 1688 } 1689 } else { 1690 Register lreg = left->as_pointer_register(); 1691 Register res = dest->as_register_lo(); 1692 long con = right->as_constant_ptr()->as_jlong(); 1693 assert(Assembler::is_simm16(con), "must be simm16"); 1694 1695 switch (code) { 1696 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1697 con = -con; 1698 case lir_add: if (res == lreg && con == 0) break; 1699 __ addi(res, lreg, (int)con); break; 1700 case lir_mul: if (res == lreg && con == 1) break; 1701 __ mulli(res, lreg, (int)con); break; 1702 default: ShouldNotReachHere(); 1703 } 1704 } 1705 } 1706 } 1707 1708 1709 void LIR_Assembler::fpop() { 1710 Unimplemented(); 1711 // do nothing 1712 } 1713 1714 1715 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1716 switch (code) { 1717 case lir_sqrt: { 1718 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1719 break; 1720 } 1721 case lir_abs: { 1722 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1723 break; 1724 } 1725 default: { 1726 ShouldNotReachHere(); 1727 break; 1728 } 1729 } 1730 } 1731 1732 1733 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1734 if (right->is_constant()) { // see do_LogicOp 1735 long uimm; 1736 Register d, l; 1737 if (dest->is_single_cpu()) { 1738 uimm = right->as_constant_ptr()->as_jint(); 1739 d = dest->as_register(); 1740 l = left->as_register(); 1741 } else { 1742 uimm = right->as_constant_ptr()->as_jlong(); 1743 d = dest->as_register_lo(); 1744 l = left->as_register_lo(); 1745 } 1746 long uimms = (unsigned long)uimm >> 16, 1747 uimmss = (unsigned long)uimm >> 32; 1748 1749 switch (code) { 1750 case lir_logic_and: 1751 if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) { 1752 __ andi(d, l, uimm); // special cases 1753 } else if (uimms != 0) { __ andis_(d, l, uimms); } 1754 else { __ andi_(d, l, uimm); } 1755 break; 1756 1757 case lir_logic_or: 1758 if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); } 1759 else { __ ori(d, l, uimm); } 1760 break; 1761 1762 case lir_logic_xor: 1763 if (uimm == -1) { __ nand(d, l, l); } // special case 1764 else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); } 1765 else { __ xori(d, l, uimm); } 1766 break; 1767 1768 default: ShouldNotReachHere(); 1769 } 1770 } else { 1771 assert(right->is_register(), "right should be in register"); 1772 1773 if (dest->is_single_cpu()) { 1774 switch (code) { 1775 case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break; 1776 case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break; 1777 case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break; 1778 default: ShouldNotReachHere(); 1779 } 1780 } else { 1781 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1782 left->as_register_lo(); 1783 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1784 right->as_register_lo(); 1785 1786 switch (code) { 1787 case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break; 1788 case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break; 1789 case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break; 1790 default: ShouldNotReachHere(); 1791 } 1792 } 1793 } 1794 } 1795 1796 1797 int LIR_Assembler::shift_amount(BasicType t) { 1798 int elem_size = type2aelembytes(t); 1799 switch (elem_size) { 1800 case 1 : return 0; 1801 case 2 : return 1; 1802 case 4 : return 2; 1803 case 8 : return 3; 1804 } 1805 ShouldNotReachHere(); 1806 return -1; 1807 } 1808 1809 1810 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1811 info->add_register_oop(exceptionOop); 1812 1813 // Reuse the debug info from the safepoint poll for the throw op itself. 1814 address pc_for_athrow = __ pc(); 1815 int pc_for_athrow_offset = __ offset(); 1816 //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1817 //__ relocate(rspec); 1818 //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0); 1819 __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); 1820 add_call_info(pc_for_athrow_offset, info); // for exception handler 1821 1822 address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1823 : Runtime1::handle_exception_nofpu_id); 1824 //__ load_const_optimized(R0, stub); 1825 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 1826 __ mtctr(R0); 1827 __ bctr(); 1828 } 1829 1830 1831 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1832 // Note: Not used with EnableDebuggingOnDemand. 1833 assert(exceptionOop->as_register() == R3, "should match"); 1834 __ b(_unwind_handler_entry); 1835 } 1836 1837 1838 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1839 Register src = op->src()->as_register(); 1840 Register dst = op->dst()->as_register(); 1841 Register src_pos = op->src_pos()->as_register(); 1842 Register dst_pos = op->dst_pos()->as_register(); 1843 Register length = op->length()->as_register(); 1844 Register tmp = op->tmp()->as_register(); 1845 Register tmp2 = R0; 1846 1847 int flags = op->flags(); 1848 ciArrayKlass* default_type = op->expected_type(); 1849 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1850 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1851 1852 // Set up the arraycopy stub information. 1853 ArrayCopyStub* stub = op->stub(); 1854 const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. 1855 1856 // Always do stub if no type information is available. It's ok if 1857 // the known type isn't loaded since the code sanity checks 1858 // in debug mode and the type isn't required when we know the exact type 1859 // also check that the type is an array type. 1860 if (op->expected_type() == NULL) { 1861 assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && 1862 length->is_nonvolatile(), "must preserve"); 1863 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1864 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 1865 1866 // 3 parms are int. Convert to long. 1867 __ mr(R3_ARG1, src); 1868 __ extsw(R4_ARG2, src_pos); 1869 __ mr(R5_ARG3, dst); 1870 __ extsw(R6_ARG4, dst_pos); 1871 __ extsw(R7_ARG5, length); 1872 1873 #ifndef PRODUCT 1874 if (PrintC1Statistics) { 1875 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1876 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 1877 __ lwz(R11_scratch1, simm16_offs, tmp); 1878 __ addi(R11_scratch1, R11_scratch1, 1); 1879 __ stw(R11_scratch1, simm16_offs, tmp); 1880 } 1881 #endif 1882 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 1883 1884 __ nand(tmp, R3_RET, R3_RET); 1885 __ subf(length, tmp, length); 1886 __ add(src_pos, tmp, src_pos); 1887 __ add(dst_pos, tmp, dst_pos); 1888 1889 __ cmpwi(CCR0, R3_RET, 0); 1890 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry()); 1891 __ bind(*stub->continuation()); 1892 return; 1893 } 1894 1895 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1896 Label cont, slow, copyfunc; 1897 1898 bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | 1899 LIR_OpArrayCopy::dst_null_check | 1900 LIR_OpArrayCopy::src_pos_positive_check | 1901 LIR_OpArrayCopy::dst_pos_positive_check | 1902 LIR_OpArrayCopy::length_positive_check); 1903 1904 // Use only one conditional branch for simple checks. 1905 if (simple_check_flag_set) { 1906 ConditionRegister combined_check = CCR1, tmp_check = CCR1; 1907 1908 // Make sure src and dst are non-null. 1909 if (flags & LIR_OpArrayCopy::src_null_check) { 1910 __ cmpdi(combined_check, src, 0); 1911 tmp_check = CCR0; 1912 } 1913 1914 if (flags & LIR_OpArrayCopy::dst_null_check) { 1915 __ cmpdi(tmp_check, dst, 0); 1916 if (tmp_check != combined_check) { 1917 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal); 1918 } 1919 tmp_check = CCR0; 1920 } 1921 1922 // Clear combined_check.eq if not already used. 1923 if (tmp_check == combined_check) { 1924 __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal); 1925 tmp_check = CCR0; 1926 } 1927 1928 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1929 // Test src_pos register. 1930 __ cmpwi(tmp_check, src_pos, 0); 1931 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1932 } 1933 1934 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1935 // Test dst_pos register. 1936 __ cmpwi(tmp_check, dst_pos, 0); 1937 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1938 } 1939 1940 if (flags & LIR_OpArrayCopy::length_positive_check) { 1941 // Make sure length isn't negative. 1942 __ cmpwi(tmp_check, length, 0); 1943 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1944 } 1945 1946 __ beq(combined_check, slow); 1947 } 1948 1949 // If the compiler was not able to prove that exact type of the source or the destination 1950 // of the arraycopy is an array type, check at runtime if the source or the destination is 1951 // an instance type. 1952 if (flags & LIR_OpArrayCopy::type_check) { 1953 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 1954 __ load_klass(tmp, dst); 1955 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1956 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1957 __ bge(CCR0, slow); 1958 } 1959 1960 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 1961 __ load_klass(tmp, src); 1962 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1963 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1964 __ bge(CCR0, slow); 1965 } 1966 } 1967 1968 // Higher 32bits must be null. 1969 __ extsw(length, length); 1970 1971 __ extsw(src_pos, src_pos); 1972 if (flags & LIR_OpArrayCopy::src_range_check) { 1973 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src); 1974 __ add(tmp, length, src_pos); 1975 __ cmpld(CCR0, tmp2, tmp); 1976 __ ble(CCR0, slow); 1977 } 1978 1979 __ extsw(dst_pos, dst_pos); 1980 if (flags & LIR_OpArrayCopy::dst_range_check) { 1981 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst); 1982 __ add(tmp, length, dst_pos); 1983 __ cmpld(CCR0, tmp2, tmp); 1984 __ ble(CCR0, slow); 1985 } 1986 1987 int shift = shift_amount(basic_type); 1988 1989 if (!(flags & LIR_OpArrayCopy::type_check)) { 1990 __ b(cont); 1991 } else { 1992 // We don't know the array types are compatible. 1993 if (basic_type != T_OBJECT) { 1994 // Simple test for basic type arrays. 1995 if (UseCompressedClassPointers) { 1996 // We don't need decode because we just need to compare. 1997 __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); 1998 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 1999 __ cmpw(CCR0, tmp, tmp2); 2000 } else { 2001 __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); 2002 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2003 __ cmpd(CCR0, tmp, tmp2); 2004 } 2005 __ beq(CCR0, cont); 2006 } else { 2007 // For object arrays, if src is a sub class of dst then we can 2008 // safely do the copy. 2009 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2010 2011 const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf 2012 assert_different_registers(tmp, tmp2, sub_klass, super_klass); 2013 2014 __ load_klass(sub_klass, src); 2015 __ load_klass(super_klass, dst); 2016 2017 __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, 2018 &cont, copyfunc_addr != NULL ? ©func : &slow, NULL); 2019 2020 address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2021 //__ load_const_optimized(tmp, slow_stc, tmp2); 2022 __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); 2023 __ mtctr(tmp); 2024 __ bctrl(); // sets CR0 2025 __ beq(CCR0, cont); 2026 2027 if (copyfunc_addr != NULL) { // Use stub if available. 2028 __ bind(copyfunc); 2029 // Src is not a sub class of dst so we have to do a 2030 // per-element check. 2031 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2032 if ((flags & mask) != mask) { 2033 assert(flags & mask, "one of the two should be known to be an object array"); 2034 2035 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2036 __ load_klass(tmp, src); 2037 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2038 __ load_klass(tmp, dst); 2039 } 2040 2041 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 2042 2043 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2044 __ load_const_optimized(tmp, objArray_lh); 2045 __ cmpw(CCR0, tmp, tmp2); 2046 __ bne(CCR0, slow); 2047 } 2048 2049 Register src_ptr = R3_ARG1; 2050 Register dst_ptr = R4_ARG2; 2051 Register len = R5_ARG3; 2052 Register chk_off = R6_ARG4; 2053 Register super_k = R7_ARG5; 2054 2055 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2056 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2057 if (shift == 0) { 2058 __ add(src_ptr, src_pos, src_ptr); 2059 __ add(dst_ptr, dst_pos, dst_ptr); 2060 } else { 2061 __ sldi(tmp, src_pos, shift); 2062 __ sldi(tmp2, dst_pos, shift); 2063 __ add(src_ptr, tmp, src_ptr); 2064 __ add(dst_ptr, tmp2, dst_ptr); 2065 } 2066 2067 __ load_klass(tmp, dst); 2068 __ mr(len, length); 2069 2070 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2071 __ ld(super_k, ek_offset, tmp); 2072 2073 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2074 __ lwz(chk_off, sco_offset, super_k); 2075 2076 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 2077 2078 #ifndef PRODUCT 2079 if (PrintC1Statistics) { 2080 Label failed; 2081 __ cmpwi(CCR0, R3_RET, 0); 2082 __ bne(CCR0, failed); 2083 address counter = (address)&Runtime1::_arraycopy_checkcast_cnt; 2084 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2085 __ lwz(R11_scratch1, simm16_offs, tmp); 2086 __ addi(R11_scratch1, R11_scratch1, 1); 2087 __ stw(R11_scratch1, simm16_offs, tmp); 2088 __ bind(failed); 2089 } 2090 #endif 2091 2092 __ nand(tmp, R3_RET, R3_RET); 2093 __ cmpwi(CCR0, R3_RET, 0); 2094 __ beq(CCR0, *stub->continuation()); 2095 2096 #ifndef PRODUCT 2097 if (PrintC1Statistics) { 2098 address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt; 2099 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2100 __ lwz(R11_scratch1, simm16_offs, tmp); 2101 __ addi(R11_scratch1, R11_scratch1, 1); 2102 __ stw(R11_scratch1, simm16_offs, tmp); 2103 } 2104 #endif 2105 2106 __ subf(length, tmp, length); 2107 __ add(src_pos, tmp, src_pos); 2108 __ add(dst_pos, tmp, dst_pos); 2109 } 2110 } 2111 } 2112 __ bind(slow); 2113 __ b(*stub->entry()); 2114 __ bind(cont); 2115 2116 #ifdef ASSERT 2117 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2118 // Sanity check the known type with the incoming class. For the 2119 // primitive case the types must match exactly with src.klass and 2120 // dst.klass each exactly matching the default type. For the 2121 // object array case, if no type check is needed then either the 2122 // dst type is exactly the expected type and the src type is a 2123 // subtype which we can't check or src is the same array as dst 2124 // but not necessarily exactly of type default_type. 2125 Label known_ok, halt; 2126 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2127 if (UseCompressedClassPointers) { 2128 // Tmp holds the default type. It currently comes uncompressed after the 2129 // load of a constant, so encode it. 2130 __ encode_klass_not_null(tmp); 2131 // Load the raw value of the dst klass, since we will be comparing 2132 // uncompressed values directly. 2133 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2134 __ cmpw(CCR0, tmp, tmp2); 2135 if (basic_type != T_OBJECT) { 2136 __ bne(CCR0, halt); 2137 // Load the raw value of the src klass. 2138 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); 2139 __ cmpw(CCR0, tmp, tmp2); 2140 __ beq(CCR0, known_ok); 2141 } else { 2142 __ beq(CCR0, known_ok); 2143 __ cmpw(CCR0, src, dst); 2144 __ beq(CCR0, known_ok); 2145 } 2146 } else { 2147 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2148 __ cmpd(CCR0, tmp, tmp2); 2149 if (basic_type != T_OBJECT) { 2150 __ bne(CCR0, halt); 2151 // Load the raw value of the src klass. 2152 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); 2153 __ cmpd(CCR0, tmp, tmp2); 2154 __ beq(CCR0, known_ok); 2155 } else { 2156 __ beq(CCR0, known_ok); 2157 __ cmpd(CCR0, src, dst); 2158 __ beq(CCR0, known_ok); 2159 } 2160 } 2161 __ bind(halt); 2162 __ stop("incorrect type information in arraycopy"); 2163 __ bind(known_ok); 2164 } 2165 #endif 2166 2167 #ifndef PRODUCT 2168 if (PrintC1Statistics) { 2169 address counter = Runtime1::arraycopy_count_address(basic_type); 2170 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2171 __ lwz(R11_scratch1, simm16_offs, tmp); 2172 __ addi(R11_scratch1, R11_scratch1, 1); 2173 __ stw(R11_scratch1, simm16_offs, tmp); 2174 } 2175 #endif 2176 2177 Register src_ptr = R3_ARG1; 2178 Register dst_ptr = R4_ARG2; 2179 Register len = R5_ARG3; 2180 2181 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2182 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2183 if (shift == 0) { 2184 __ add(src_ptr, src_pos, src_ptr); 2185 __ add(dst_ptr, dst_pos, dst_ptr); 2186 } else { 2187 __ sldi(tmp, src_pos, shift); 2188 __ sldi(tmp2, dst_pos, shift); 2189 __ add(src_ptr, tmp, src_ptr); 2190 __ add(dst_ptr, tmp2, dst_ptr); 2191 } 2192 2193 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2194 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2195 const char *name; 2196 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2197 2198 // Arraycopy stubs takes a length in number of elements, so don't scale it. 2199 __ mr(len, length); 2200 __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0); 2201 2202 __ bind(*stub->continuation()); 2203 } 2204 2205 2206 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2207 if (dest->is_single_cpu()) { 2208 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5); 2209 #ifdef _LP64 2210 if (left->type() == T_OBJECT) { 2211 switch (code) { 2212 case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break; 2213 case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break; 2214 case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break; 2215 default: ShouldNotReachHere(); 2216 } 2217 } else 2218 #endif 2219 switch (code) { 2220 case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2221 case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2222 case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2223 default: ShouldNotReachHere(); 2224 } 2225 } else { 2226 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6); 2227 switch (code) { 2228 case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2229 case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2230 case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2231 default: ShouldNotReachHere(); 2232 } 2233 } 2234 } 2235 2236 2237 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2238 #ifdef _LP64 2239 if (left->type() == T_OBJECT) { 2240 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2241 if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); } 2242 else { 2243 switch (code) { 2244 case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break; 2245 case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break; 2246 case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break; 2247 default: ShouldNotReachHere(); 2248 } 2249 } 2250 return; 2251 } 2252 #endif 2253 2254 if (dest->is_single_cpu()) { 2255 count = count & 0x1F; // Java spec 2256 if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); } 2257 else { 2258 switch (code) { 2259 case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break; 2260 case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break; 2261 case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break; 2262 default: ShouldNotReachHere(); 2263 } 2264 } 2265 } else if (dest->is_double_cpu()) { 2266 count = count & 63; // Java spec 2267 if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); } 2268 else { 2269 switch (code) { 2270 case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2271 case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2272 case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2273 default: ShouldNotReachHere(); 2274 } 2275 } 2276 } else { 2277 ShouldNotReachHere(); 2278 } 2279 } 2280 2281 2282 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2283 if (op->init_check()) { 2284 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2285 explicit_null_check(op->klass()->as_register(), op->stub()->info()); 2286 } else { 2287 add_debug_info_for_null_check_here(op->stub()->info()); 2288 } 2289 __ lbz(op->tmp1()->as_register(), 2290 in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register()); 2291 __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2292 __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry()); 2293 } 2294 __ allocate_object(op->obj()->as_register(), 2295 op->tmp1()->as_register(), 2296 op->tmp2()->as_register(), 2297 op->tmp3()->as_register(), 2298 op->header_size(), 2299 op->object_size(), 2300 op->klass()->as_register(), 2301 *op->stub()->entry()); 2302 2303 __ bind(*op->stub()->continuation()); 2304 __ verify_oop(op->obj()->as_register()); 2305 } 2306 2307 2308 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2309 LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); ) 2310 if (UseSlowPath || 2311 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2312 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2313 __ b(*op->stub()->entry()); 2314 } else { 2315 __ allocate_array(op->obj()->as_register(), 2316 op->len()->as_register(), 2317 op->tmp1()->as_register(), 2318 op->tmp2()->as_register(), 2319 op->tmp3()->as_register(), 2320 arrayOopDesc::header_size(op->type()), 2321 type2aelembytes(op->type()), 2322 op->klass()->as_register(), 2323 *op->stub()->entry()); 2324 } 2325 __ bind(*op->stub()->continuation()); 2326 } 2327 2328 2329 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2330 ciMethodData *md, ciProfileData *data, 2331 Register recv, Register tmp1, Label* update_done) { 2332 uint i; 2333 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2334 Label next_test; 2335 // See if the receiver is receiver[n]. 2336 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2337 __ verify_klass_ptr(tmp1); 2338 __ cmpd(CCR0, recv, tmp1); 2339 __ bne(CCR0, next_test); 2340 2341 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2342 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2343 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2344 __ b(*update_done); 2345 2346 __ bind(next_test); 2347 } 2348 2349 // Didn't find receiver; find next empty slot and fill it in. 2350 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2351 Label next_test; 2352 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2353 __ cmpdi(CCR0, tmp1, 0); 2354 __ bne(CCR0, next_test); 2355 __ li(tmp1, DataLayout::counter_increment); 2356 __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2357 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2358 __ b(*update_done); 2359 2360 __ bind(next_test); 2361 } 2362 } 2363 2364 2365 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2366 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2367 md = method->method_data_or_null(); 2368 assert(md != NULL, "Sanity"); 2369 data = md->bci_to_data(bci); 2370 assert(data != NULL, "need data for checkcast"); 2371 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2372 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2373 // The offset is large so bias the mdo by the base of the slot so 2374 // that the ld can use simm16s to reference the slots of the data. 2375 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2376 } 2377 } 2378 2379 2380 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2381 Register obj = op->object()->as_register(); 2382 Register k_RInfo = op->tmp1()->as_register(); 2383 Register klass_RInfo = op->tmp2()->as_register(); 2384 Register Rtmp1 = op->tmp3()->as_register(); 2385 Register dst = op->result_opr()->as_register(); 2386 ciKlass* k = op->klass(); 2387 bool should_profile = op->should_profile(); 2388 bool move_obj_to_dst = (op->code() == lir_checkcast); 2389 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps. 2390 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1); 2391 bool restore_obj = move_obj_to_dst && reg_conflict; 2392 2393 __ cmpdi(CCR0, obj, 0); 2394 if (move_obj_to_dst || reg_conflict) { 2395 __ mr_if_needed(dst, obj); 2396 if (reg_conflict) { obj = dst; } 2397 } 2398 2399 ciMethodData* md; 2400 ciProfileData* data; 2401 int mdo_offset_bias = 0; 2402 if (should_profile) { 2403 ciMethod* method = op->profiled_method(); 2404 assert(method != NULL, "Should have method"); 2405 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2406 2407 Register mdo = k_RInfo; 2408 Register data_val = Rtmp1; 2409 Label not_null; 2410 __ bne(CCR0, not_null); 2411 metadata2reg(md->constant_encoding(), mdo); 2412 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2413 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2414 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2415 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2416 __ b(*obj_is_null); 2417 __ bind(not_null); 2418 } else { 2419 __ beq(CCR0, *obj_is_null); 2420 } 2421 2422 // get object class 2423 __ load_klass(klass_RInfo, obj); 2424 2425 if (k->is_loaded()) { 2426 metadata2reg(k->constant_encoding(), k_RInfo); 2427 } else { 2428 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2429 } 2430 2431 Label profile_cast_failure, failure_restore_obj, profile_cast_success; 2432 Label *failure_target = should_profile ? &profile_cast_failure : failure; 2433 Label *success_target = should_profile ? &profile_cast_success : success; 2434 2435 if (op->fast_check()) { 2436 assert_different_registers(klass_RInfo, k_RInfo); 2437 __ cmpd(CCR0, k_RInfo, klass_RInfo); 2438 if (should_profile) { 2439 __ bne(CCR0, *failure_target); 2440 // Fall through to success case. 2441 } else { 2442 __ beq(CCR0, *success); 2443 // Fall through to failure case. 2444 } 2445 } else { 2446 bool need_slow_path = true; 2447 if (k->is_loaded()) { 2448 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) { 2449 need_slow_path = false; 2450 } 2451 // Perform the fast part of the checking logic. 2452 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), 2453 failure_target, NULL, RegisterOrConstant(k->super_check_offset())); 2454 } else { 2455 // Perform the fast part of the checking logic. 2456 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); 2457 } 2458 if (!need_slow_path) { 2459 if (!should_profile) { __ b(*success); } 2460 } else { 2461 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2462 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2463 //__ load_const_optimized(Rtmp1, entry, R0); 2464 __ calculate_address_from_global_toc(Rtmp1, entry, true, true, false); 2465 __ mtctr(Rtmp1); 2466 __ bctrl(); // sets CR0 2467 if (should_profile) { 2468 __ bne(CCR0, *failure_target); 2469 // Fall through to success case. 2470 } else { 2471 __ beq(CCR0, *success); 2472 // Fall through to failure case. 2473 } 2474 } 2475 } 2476 2477 if (should_profile) { 2478 Register mdo = k_RInfo, recv = klass_RInfo; 2479 assert_different_registers(mdo, recv, Rtmp1); 2480 __ bind(profile_cast_success); 2481 metadata2reg(md->constant_encoding(), mdo); 2482 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2483 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success); 2484 __ b(*success); 2485 2486 // Cast failure case. 2487 __ bind(profile_cast_failure); 2488 metadata2reg(md->constant_encoding(), mdo); 2489 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2490 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2491 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment); 2492 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2493 } 2494 2495 __ bind(*failure); 2496 2497 if (restore_obj) { 2498 __ mr(op->object()->as_register(), dst); 2499 // Fall through to failure case. 2500 } 2501 } 2502 2503 2504 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2505 LIR_Code code = op->code(); 2506 if (code == lir_store_check) { 2507 Register value = op->object()->as_register(); 2508 Register array = op->array()->as_register(); 2509 Register k_RInfo = op->tmp1()->as_register(); 2510 Register klass_RInfo = op->tmp2()->as_register(); 2511 Register Rtmp1 = op->tmp3()->as_register(); 2512 bool should_profile = op->should_profile(); 2513 2514 __ verify_oop(value); 2515 CodeStub* stub = op->stub(); 2516 // Check if it needs to be profiled. 2517 ciMethodData* md; 2518 ciProfileData* data; 2519 int mdo_offset_bias = 0; 2520 if (should_profile) { 2521 ciMethod* method = op->profiled_method(); 2522 assert(method != NULL, "Should have method"); 2523 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2524 } 2525 Label profile_cast_success, failure, done; 2526 Label *success_target = should_profile ? &profile_cast_success : &done; 2527 2528 __ cmpdi(CCR0, value, 0); 2529 if (should_profile) { 2530 Label not_null; 2531 __ bne(CCR0, not_null); 2532 Register mdo = k_RInfo; 2533 Register data_val = Rtmp1; 2534 metadata2reg(md->constant_encoding(), mdo); 2535 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2536 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2537 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2538 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2539 __ b(done); 2540 __ bind(not_null); 2541 } else { 2542 __ beq(CCR0, done); 2543 } 2544 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2545 explicit_null_check(array, op->info_for_exception()); 2546 } else { 2547 add_debug_info_for_null_check_here(op->info_for_exception()); 2548 } 2549 __ load_klass(k_RInfo, array); 2550 __ load_klass(klass_RInfo, value); 2551 2552 // Get instance klass. 2553 __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); 2554 // Perform the fast part of the checking logic. 2555 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); 2556 2557 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2558 const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2559 //__ load_const_optimized(R0, slow_path); 2560 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); 2561 __ mtctr(R0); 2562 __ bctrl(); // sets CR0 2563 if (!should_profile) { 2564 __ beq(CCR0, done); 2565 __ bind(failure); 2566 } else { 2567 __ bne(CCR0, failure); 2568 // Fall through to the success case. 2569 2570 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2571 assert_different_registers(value, mdo, recv, tmp1); 2572 __ bind(profile_cast_success); 2573 metadata2reg(md->constant_encoding(), mdo); 2574 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2575 __ load_klass(recv, value); 2576 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2577 __ b(done); 2578 2579 // Cast failure case. 2580 __ bind(failure); 2581 metadata2reg(md->constant_encoding(), mdo); 2582 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2583 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2584 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2585 __ addi(tmp1, tmp1, -DataLayout::counter_increment); 2586 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2587 } 2588 __ b(*stub->entry()); 2589 __ bind(done); 2590 2591 } else if (code == lir_checkcast) { 2592 Label success, failure; 2593 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); // Moves obj to dst. 2594 __ b(*op->stub()->entry()); 2595 __ align(32, 12); 2596 __ bind(success); 2597 } else if (code == lir_instanceof) { 2598 Register dst = op->result_opr()->as_register(); 2599 Label success, failure, done; 2600 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure); 2601 __ li(dst, 0); 2602 __ b(done); 2603 __ align(32, 12); 2604 __ bind(success); 2605 __ li(dst, 1); 2606 __ bind(done); 2607 } else { 2608 ShouldNotReachHere(); 2609 } 2610 } 2611 2612 2613 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2614 Register addr = op->addr()->as_pointer_register(); 2615 Register cmp_value = noreg, new_value = noreg; 2616 bool is_64bit = false; 2617 2618 if (op->code() == lir_cas_long) { 2619 cmp_value = op->cmp_value()->as_register_lo(); 2620 new_value = op->new_value()->as_register_lo(); 2621 is_64bit = true; 2622 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2623 cmp_value = op->cmp_value()->as_register(); 2624 new_value = op->new_value()->as_register(); 2625 if (op->code() == lir_cas_obj) { 2626 if (UseCompressedOops) { 2627 Register t1 = op->tmp1()->as_register(); 2628 Register t2 = op->tmp2()->as_register(); 2629 cmp_value = __ encode_heap_oop(t1, cmp_value); 2630 new_value = __ encode_heap_oop(t2, new_value); 2631 } else { 2632 is_64bit = true; 2633 } 2634 } 2635 } else { 2636 Unimplemented(); 2637 } 2638 2639 if (is_64bit) { 2640 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2641 MacroAssembler::MemBarNone, 2642 MacroAssembler::cmpxchgx_hint_atomic_update(), 2643 noreg, NULL, /*check without ldarx first*/true); 2644 } else { 2645 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2646 MacroAssembler::MemBarNone, 2647 MacroAssembler::cmpxchgx_hint_atomic_update(), 2648 noreg, /*check without ldarx first*/true); 2649 } 2650 2651 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2652 __ isync(); 2653 } else { 2654 __ sync(); 2655 } 2656 } 2657 2658 2659 void LIR_Assembler::set_24bit_FPU() { 2660 Unimplemented(); 2661 } 2662 2663 void LIR_Assembler::reset_FPU() { 2664 Unimplemented(); 2665 } 2666 2667 2668 void LIR_Assembler::breakpoint() { 2669 __ illtrap(); 2670 } 2671 2672 2673 void LIR_Assembler::push(LIR_Opr opr) { 2674 Unimplemented(); 2675 } 2676 2677 void LIR_Assembler::pop(LIR_Opr opr) { 2678 Unimplemented(); 2679 } 2680 2681 2682 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2683 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2684 Register dst = dst_opr->as_register(); 2685 Register reg = mon_addr.base(); 2686 int offset = mon_addr.disp(); 2687 // Compute pointer to BasicLock. 2688 __ add_const_optimized(dst, reg, offset); 2689 } 2690 2691 2692 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2693 Register obj = op->obj_opr()->as_register(); 2694 Register hdr = op->hdr_opr()->as_register(); 2695 Register lock = op->lock_opr()->as_register(); 2696 2697 // Obj may not be an oop. 2698 if (op->code() == lir_lock) { 2699 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2700 if (UseFastLocking) { 2701 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2702 // Add debug info for NullPointerException only if one is possible. 2703 if (op->info() != NULL) { 2704 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2705 explicit_null_check(obj, op->info()); 2706 } else { 2707 add_debug_info_for_null_check_here(op->info()); 2708 } 2709 } 2710 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2711 } else { 2712 // always do slow locking 2713 // note: The slow locking code could be inlined here, however if we use 2714 // slow locking, speed doesn't matter anyway and this solution is 2715 // simpler and requires less duplicated code - additionally, the 2716 // slow locking code is the same in either case which simplifies 2717 // debugging. 2718 __ b(*op->stub()->entry()); 2719 } 2720 } else { 2721 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2722 if (UseFastLocking) { 2723 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2724 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2725 } else { 2726 // always do slow unlocking 2727 // note: The slow unlocking code could be inlined here, however if we use 2728 // slow unlocking, speed doesn't matter anyway and this solution is 2729 // simpler and requires less duplicated code - additionally, the 2730 // slow unlocking code is the same in either case which simplifies 2731 // debugging. 2732 __ b(*op->stub()->entry()); 2733 } 2734 } 2735 __ bind(*op->stub()->continuation()); 2736 } 2737 2738 2739 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2740 ciMethod* method = op->profiled_method(); 2741 int bci = op->profiled_bci(); 2742 ciMethod* callee = op->profiled_callee(); 2743 2744 // Update counter for all call types. 2745 ciMethodData* md = method->method_data_or_null(); 2746 assert(md != NULL, "Sanity"); 2747 ciProfileData* data = md->bci_to_data(bci); 2748 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2749 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2750 Register mdo = op->mdo()->as_register(); 2751 #ifdef _LP64 2752 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2753 Register tmp1 = op->tmp1()->as_register_lo(); 2754 #else 2755 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2756 Register tmp1 = op->tmp1()->as_register(); 2757 #endif 2758 metadata2reg(md->constant_encoding(), mdo); 2759 int mdo_offset_bias = 0; 2760 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2761 data->size_in_bytes())) { 2762 // The offset is large so bias the mdo by the base of the slot so 2763 // that the ld can use simm16s to reference the slots of the data. 2764 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2765 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2766 } 2767 2768 // Perform additional virtual call profiling for invokevirtual and 2769 // invokeinterface bytecodes 2770 if (op->should_profile_receiver_type()) { 2771 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2772 Register recv = op->recv()->as_register(); 2773 assert_different_registers(mdo, tmp1, recv); 2774 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2775 ciKlass* known_klass = op->known_holder(); 2776 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2777 // We know the type that will be seen at this call site; we can 2778 // statically update the MethodData* rather than needing to do 2779 // dynamic tests on the receiver type. 2780 2781 // NOTE: we should probably put a lock around this search to 2782 // avoid collisions by concurrent compilations. 2783 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2784 uint i; 2785 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2786 ciKlass* receiver = vc_data->receiver(i); 2787 if (known_klass->equals(receiver)) { 2788 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2789 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2790 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2791 return; 2792 } 2793 } 2794 2795 // Receiver type not found in profile data; select an empty slot. 2796 2797 // Note that this is less efficient than it should be because it 2798 // always does a write to the receiver part of the 2799 // VirtualCallData rather than just the first time. 2800 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2801 ciKlass* receiver = vc_data->receiver(i); 2802 if (receiver == NULL) { 2803 metadata2reg(known_klass->constant_encoding(), tmp1); 2804 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); 2805 2806 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2807 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2808 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2809 return; 2810 } 2811 } 2812 } else { 2813 __ load_klass(recv, recv); 2814 Label update_done; 2815 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2816 // Receiver did not match any saved receiver and there is no empty row for it. 2817 // Increment total counter to indicate polymorphic case. 2818 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2819 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2820 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2821 2822 __ bind(update_done); 2823 } 2824 } else { 2825 // Static call 2826 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2827 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2828 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2829 } 2830 } 2831 2832 2833 void LIR_Assembler::align_backward_branch_target() { 2834 __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary. 2835 } 2836 2837 2838 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2839 Unimplemented(); 2840 } 2841 2842 2843 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2844 assert(left->is_register(), "can only handle registers"); 2845 2846 if (left->is_single_cpu()) { 2847 __ neg(dest->as_register(), left->as_register()); 2848 } else if (left->is_single_fpu()) { 2849 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2850 } else if (left->is_double_fpu()) { 2851 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2852 } else { 2853 assert (left->is_double_cpu(), "Must be a long"); 2854 __ neg(dest->as_register_lo(), left->as_register_lo()); 2855 } 2856 } 2857 2858 2859 void LIR_Assembler::fxch(int i) { 2860 Unimplemented(); 2861 } 2862 2863 void LIR_Assembler::fld(int i) { 2864 Unimplemented(); 2865 } 2866 2867 void LIR_Assembler::ffree(int i) { 2868 Unimplemented(); 2869 } 2870 2871 2872 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2873 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2874 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2875 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2876 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2877 //__ load_const_optimized(R0, dest); 2878 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2879 __ mtctr(R0); 2880 __ bctrl(); 2881 assert(info != NULL, "sanity"); 2882 add_call_info_here(info); 2883 return; 2884 } 2885 2886 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2887 if (info != NULL) { 2888 add_call_info_here(info); 2889 } 2890 } 2891 2892 2893 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2894 ShouldNotReachHere(); // Not needed on _LP64. 2895 } 2896 2897 void LIR_Assembler::membar() { 2898 __ fence(); 2899 } 2900 2901 void LIR_Assembler::membar_acquire() { 2902 __ acquire(); 2903 } 2904 2905 void LIR_Assembler::membar_release() { 2906 __ release(); 2907 } 2908 2909 void LIR_Assembler::membar_loadload() { 2910 __ membar(Assembler::LoadLoad); 2911 } 2912 2913 void LIR_Assembler::membar_storestore() { 2914 __ membar(Assembler::StoreStore); 2915 } 2916 2917 void LIR_Assembler::membar_loadstore() { 2918 __ membar(Assembler::LoadStore); 2919 } 2920 2921 void LIR_Assembler::membar_storeload() { 2922 __ membar(Assembler::StoreLoad); 2923 } 2924 2925 void LIR_Assembler::on_spin_wait() { 2926 Unimplemented(); 2927 } 2928 2929 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2930 assert(patch_code == lir_patch_none, "Patch code not supported"); 2931 LIR_Address* addr = addr_opr->as_address_ptr(); 2932 assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); 2933 if (addr->index()->is_illegal()) { 2934 __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp()); 2935 } else { 2936 assert(addr->disp() == 0, "can't have both: index and disp"); 2937 __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register()); 2938 } 2939 } 2940 2941 2942 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2943 ShouldNotReachHere(); 2944 } 2945 2946 2947 #ifdef ASSERT 2948 // Emit run-time assertion. 2949 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2950 Unimplemented(); 2951 } 2952 #endif 2953 2954 2955 void LIR_Assembler::peephole(LIR_List* lir) { 2956 // Optimize instruction pairs before emitting. 2957 LIR_OpList* inst = lir->instructions_list(); 2958 for (int i = 1; i < inst->length(); i++) { 2959 LIR_Op* op = inst->at(i); 2960 2961 // 2 register-register-moves 2962 if (op->code() == lir_move) { 2963 LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(), 2964 res2 = ((LIR_Op1*)op)->result_opr(); 2965 if (in2->is_register() && res2->is_register()) { 2966 LIR_Op* prev = inst->at(i - 1); 2967 if (prev && prev->code() == lir_move) { 2968 LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(), 2969 res1 = ((LIR_Op1*)prev)->result_opr(); 2970 if (in1->is_same_register(res2) && in2->is_same_register(res1)) { 2971 inst->remove_at(i); 2972 } 2973 } 2974 } 2975 } 2976 2977 } 2978 return; 2979 } 2980 2981 2982 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2983 const LIR_Address *addr = src->as_address_ptr(); 2984 assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!"); 2985 const Register Rptr = addr->base()->as_pointer_register(), 2986 Rtmp = tmp->as_register(); 2987 Register Rco = noreg; 2988 if (UseCompressedOops && data->is_oop()) { 2989 Rco = __ encode_heap_oop(Rtmp, data->as_register()); 2990 } 2991 2992 Label Lretry; 2993 __ bind(Lretry); 2994 2995 if (data->type() == T_INT) { 2996 const Register Rold = dest->as_register(), 2997 Rsrc = data->as_register(); 2998 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 2999 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3000 if (code == lir_xadd) { 3001 __ add(Rtmp, Rsrc, Rold); 3002 __ stwcx_(Rtmp, Rptr); 3003 } else { 3004 __ stwcx_(Rsrc, Rptr); 3005 } 3006 } else if (data->is_oop()) { 3007 assert(code == lir_xchg, "xadd for oops"); 3008 const Register Rold = dest->as_register(); 3009 if (UseCompressedOops) { 3010 assert_different_registers(Rptr, Rold, Rco); 3011 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3012 __ stwcx_(Rco, Rptr); 3013 } else { 3014 const Register Robj = data->as_register(); 3015 assert_different_registers(Rptr, Rold, Robj); 3016 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3017 __ stdcx_(Robj, Rptr); 3018 } 3019 } else if (data->type() == T_LONG) { 3020 const Register Rold = dest->as_register_lo(), 3021 Rsrc = data->as_register_lo(); 3022 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3023 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3024 if (code == lir_xadd) { 3025 __ add(Rtmp, Rsrc, Rold); 3026 __ stdcx_(Rtmp, Rptr); 3027 } else { 3028 __ stdcx_(Rsrc, Rptr); 3029 } 3030 } else { 3031 ShouldNotReachHere(); 3032 } 3033 3034 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 3035 __ bne_predict_not_taken(CCR0, Lretry); 3036 } else { 3037 __ bne( CCR0, Lretry); 3038 } 3039 3040 if (UseCompressedOops && data->is_oop()) { 3041 __ decode_heap_oop(dest->as_register()); 3042 } 3043 } 3044 3045 3046 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3047 Register obj = op->obj()->as_register(); 3048 Register tmp = op->tmp()->as_pointer_register(); 3049 LIR_Address* mdo_addr = op->mdp()->as_address_ptr(); 3050 ciKlass* exact_klass = op->exact_klass(); 3051 intptr_t current_klass = op->current_klass(); 3052 bool not_null = op->not_null(); 3053 bool no_conflict = op->no_conflict(); 3054 3055 Label Lupdate, Ldo_update, Ldone; 3056 3057 bool do_null = !not_null; 3058 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3059 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3060 3061 assert(do_null || do_update, "why are we here?"); 3062 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3063 3064 __ verify_oop(obj); 3065 3066 if (do_null) { 3067 if (!TypeEntries::was_null_seen(current_klass)) { 3068 __ cmpdi(CCR0, obj, 0); 3069 __ bne(CCR0, Lupdate); 3070 __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3071 __ ori(R0, R0, TypeEntries::null_seen); 3072 if (do_update) { 3073 __ b(Ldo_update); 3074 } else { 3075 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3076 } 3077 } else { 3078 if (do_update) { 3079 __ cmpdi(CCR0, obj, 0); 3080 __ beq(CCR0, Ldone); 3081 } 3082 } 3083 #ifdef ASSERT 3084 } else { 3085 __ cmpdi(CCR0, obj, 0); 3086 __ bne(CCR0, Lupdate); 3087 __ stop("unexpect null obj", 0x9652); 3088 #endif 3089 } 3090 3091 __ bind(Lupdate); 3092 if (do_update) { 3093 Label Lnext; 3094 const Register klass = R29_TOC; // kill and reload 3095 bool klass_reg_used = false; 3096 #ifdef ASSERT 3097 if (exact_klass != NULL) { 3098 Label ok; 3099 klass_reg_used = true; 3100 __ load_klass(klass, obj); 3101 metadata2reg(exact_klass->constant_encoding(), R0); 3102 __ cmpd(CCR0, klass, R0); 3103 __ beq(CCR0, ok); 3104 __ stop("exact klass and actual klass differ", 0x8564); 3105 __ bind(ok); 3106 } 3107 #endif 3108 3109 if (!no_conflict) { 3110 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3111 klass_reg_used = true; 3112 if (exact_klass != NULL) { 3113 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3114 metadata2reg(exact_klass->constant_encoding(), klass); 3115 } else { 3116 __ load_klass(klass, obj); 3117 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj 3118 } 3119 3120 // Like InterpreterMacroAssembler::profile_obj_type 3121 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3122 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3123 __ cmpd(CCR1, R0, klass); 3124 // Klass seen before, nothing to do (regardless of unknown bit). 3125 //beq(CCR1, do_nothing); 3126 3127 __ andi_(R0, klass, TypeEntries::type_unknown); 3128 // Already unknown. Nothing to do anymore. 3129 //bne(CCR0, do_nothing); 3130 __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 3131 __ beq(CCR0, Lnext); 3132 3133 if (TypeEntries::is_type_none(current_klass)) { 3134 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3135 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3136 __ beq(CCR0, Ldo_update); // First time here. Set profile type. 3137 } 3138 3139 } else { 3140 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3141 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3142 3143 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3144 __ andi_(R0, tmp, TypeEntries::type_unknown); 3145 // Already unknown. Nothing to do anymore. 3146 __ bne(CCR0, Lnext); 3147 } 3148 3149 // Different than before. Cannot keep accurate profile. 3150 __ ori(R0, tmp, TypeEntries::type_unknown); 3151 } else { 3152 // There's a single possible klass at this profile point 3153 assert(exact_klass != NULL, "should be"); 3154 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3155 3156 if (TypeEntries::is_type_none(current_klass)) { 3157 klass_reg_used = true; 3158 metadata2reg(exact_klass->constant_encoding(), klass); 3159 3160 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3161 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3162 __ cmpd(CCR1, R0, klass); 3163 // Klass seen before, nothing to do (regardless of unknown bit). 3164 __ beq(CCR1, Lnext); 3165 #ifdef ASSERT 3166 { 3167 Label ok; 3168 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3169 __ beq(CCR0, ok); // First time here. 3170 3171 __ stop("unexpected profiling mismatch", 0x7865); 3172 __ bind(ok); 3173 } 3174 #endif 3175 // First time here. Set profile type. 3176 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3177 } else { 3178 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3179 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3180 3181 // Already unknown. Nothing to do anymore. 3182 __ andi_(R0, tmp, TypeEntries::type_unknown); 3183 __ bne(CCR0, Lnext); 3184 3185 // Different than before. Cannot keep accurate profile. 3186 __ ori(R0, tmp, TypeEntries::type_unknown); 3187 } 3188 } 3189 3190 __ bind(Ldo_update); 3191 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3192 3193 __ bind(Lnext); 3194 if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit 3195 } 3196 __ bind(Ldone); 3197 } 3198 3199 3200 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3201 assert(op->crc()->is_single_cpu(), "crc must be register"); 3202 assert(op->val()->is_single_cpu(), "byte value must be register"); 3203 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3204 Register crc = op->crc()->as_register(); 3205 Register val = op->val()->as_register(); 3206 Register res = op->result_opr()->as_register(); 3207 3208 assert_different_registers(val, crc, res); 3209 3210 __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); 3211 __ kernel_crc32_singleByteReg(crc, val, res, true); 3212 __ mr(res, crc); 3213 } 3214 3215 #undef __