1 /* 2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2019, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Compilation.hpp" 29 #include "c1/c1_LIRAssembler.hpp" 30 #include "c1/c1_MacroAssembler.hpp" 31 #include "c1/c1_Runtime1.hpp" 32 #include "c1/c1_ValueStack.hpp" 33 #include "ci/ciArrayKlass.hpp" 34 #include "ci/ciInstance.hpp" 35 #include "gc/shared/collectedHeap.hpp" 36 #include "gc/shared/barrierSet.hpp" 37 #include "gc/shared/cardTableBarrierSet.hpp" 38 #include "nativeInst_ppc.hpp" 39 #include "oops/objArrayKlass.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/safepointMechanism.inline.hpp" 42 #include "runtime/sharedRuntime.hpp" 43 44 #define __ _masm-> 45 46 47 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5; 48 49 50 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 51 Unimplemented(); return false; // Currently not used on this platform. 52 } 53 54 55 LIR_Opr LIR_Assembler::receiverOpr() { 56 return FrameMap::R3_oop_opr; 57 } 58 59 60 LIR_Opr LIR_Assembler::osrBufferPointer() { 61 return FrameMap::R3_opr; 62 } 63 64 65 // This specifies the stack pointer decrement needed to build the frame. 66 int LIR_Assembler::initial_frame_size_in_bytes() const { 67 return in_bytes(frame_map()->framesize_in_bytes()); 68 } 69 70 71 // Inline cache check: the inline cached class is in inline_cache_reg; 72 // we fetch the class of the receiver and compare it with the cached class. 73 // If they do not match we jump to slow case. 74 int LIR_Assembler::check_icache() { 75 int offset = __ offset(); 76 __ inline_cache_check(R3_ARG1, R19_inline_cache_reg); 77 return offset; 78 } 79 80 void LIR_Assembler::clinit_barrier(ciMethod* method) { 81 ShouldNotReachHere(); // not implemented 82 } 83 84 void LIR_Assembler::osr_entry() { 85 // On-stack-replacement entry sequence: 86 // 87 // 1. Create a new compiled activation. 88 // 2. Initialize local variables in the compiled activation. The expression 89 // stack must be empty at the osr_bci; it is not initialized. 90 // 3. Jump to the continuation address in compiled code to resume execution. 91 92 // OSR entry point 93 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 94 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 95 ValueStack* entry_state = osr_entry->end()->state(); 96 int number_of_locks = entry_state->locks_size(); 97 98 // Create a frame for the compiled activation. 99 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 100 101 // OSR buffer is 102 // 103 // locals[nlocals-1..0] 104 // monitors[number_of_locks-1..0] 105 // 106 // Locals is a direct copy of the interpreter frame so in the osr buffer 107 // the first slot in the local array is the last local from the interpreter 108 // and the last slot is local[0] (receiver) from the interpreter. 109 // 110 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 111 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 112 // in the interpreter frame (the method lock if a sync method). 113 114 // Initialize monitors in the compiled activation. 115 // R3: pointer to osr buffer 116 // 117 // All other registers are dead at this point and the locals will be 118 // copied into place by code emitted in the IR. 119 120 Register OSR_buf = osrBufferPointer()->as_register(); 121 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 122 int monitor_offset = BytesPerWord * method()->max_locals() + 123 (2 * BytesPerWord) * (number_of_locks - 1); 124 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 125 // the OSR buffer using 2 word entries: first the lock and then 126 // the oop. 127 for (int i = 0; i < number_of_locks; i++) { 128 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 129 #ifdef ASSERT 130 // Verify the interpreter's monitor has a non-null object. 131 { 132 Label L; 133 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 134 __ cmpdi(CCR0, R0, 0); 135 __ bne(CCR0, L); 136 __ stop("locked object is NULL"); 137 __ bind(L); 138 } 139 #endif // ASSERT 140 // Copy the lock field into the compiled activation. 141 Address ml = frame_map()->address_for_monitor_lock(i), 142 mo = frame_map()->address_for_monitor_object(i); 143 assert(ml.index() == noreg && mo.index() == noreg, "sanity"); 144 __ ld(R0, slot_offset + 0, OSR_buf); 145 __ std(R0, ml.disp(), ml.base()); 146 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 147 __ std(R0, mo.disp(), mo.base()); 148 } 149 } 150 } 151 152 153 int LIR_Assembler::emit_exception_handler() { 154 // If the last instruction is a call (typically to do a throw which 155 // is coming at the end after block reordering) the return address 156 // must still point into the code area in order to avoid assertion 157 // failures when searching for the corresponding bci => add a nop 158 // (was bug 5/14/1999 - gri). 159 __ nop(); 160 161 // Generate code for the exception handler. 162 address handler_base = __ start_a_stub(exception_handler_size()); 163 164 if (handler_base == NULL) { 165 // Not enough space left for the handler. 166 bailout("exception handler overflow"); 167 return -1; 168 } 169 170 int offset = code_offset(); 171 address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 172 //__ load_const_optimized(R0, entry_point); 173 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); 174 __ mtctr(R0); 175 __ bctr(); 176 177 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 178 __ end_a_stub(); 179 180 return offset; 181 } 182 183 184 // Emit the code to remove the frame from the stack in the exception 185 // unwind path. 186 int LIR_Assembler::emit_unwind_handler() { 187 _masm->block_comment("Unwind handler"); 188 189 int offset = code_offset(); 190 bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes(); 191 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31; 192 193 // Fetch the exception from TLS and clear out exception related thread state. 194 __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 195 __ li(R0, 0); 196 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 197 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 198 199 __ bind(_unwind_handler_entry); 200 __ verify_not_null_oop(Rexception); 201 if (preserve_exception) { __ mr(Rexception_save, Rexception); } 202 203 // Perform needed unlocking 204 MonitorExitStub* stub = NULL; 205 if (method()->is_synchronized()) { 206 monitor_address(0, FrameMap::R4_opr); 207 stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); 208 __ unlock_object(R5, R6, R4, *stub->entry()); 209 __ bind(*stub->continuation()); 210 } 211 212 if (compilation()->env()->dtrace_method_probes()) { 213 Unimplemented(); 214 } 215 216 // Dispatch to the unwind logic. 217 address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); 218 //__ load_const_optimized(R0, unwind_stub); 219 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); 220 if (preserve_exception) { __ mr(Rexception, Rexception_save); } 221 __ mtctr(R0); 222 __ bctr(); 223 224 // Emit the slow path assembly. 225 if (stub != NULL) { 226 stub->emit_code(this); 227 } 228 229 return offset; 230 } 231 232 233 int LIR_Assembler::emit_deopt_handler() { 234 // If the last instruction is a call (typically to do a throw which 235 // is coming at the end after block reordering) the return address 236 // must still point into the code area in order to avoid assertion 237 // failures when searching for the corresponding bci => add a nop 238 // (was bug 5/14/1999 - gri). 239 __ nop(); 240 241 // Generate code for deopt handler. 242 address handler_base = __ start_a_stub(deopt_handler_size()); 243 244 if (handler_base == NULL) { 245 // Not enough space left for the handler. 246 bailout("deopt handler overflow"); 247 return -1; 248 } 249 250 int offset = code_offset(); 251 __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); 252 253 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 254 __ end_a_stub(); 255 256 return offset; 257 } 258 259 260 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 261 if (o == NULL) { 262 __ li(reg, 0); 263 } else { 264 AddressLiteral addrlit = __ constant_oop_address(o); 265 __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg); 266 } 267 } 268 269 270 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 271 // Allocate a new index in table to hold the object once it's been patched. 272 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 273 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 274 275 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); 276 __ load_const(reg, addrlit, R0); 277 278 patching_epilog(patch, lir_patch_normal, reg, info); 279 } 280 281 282 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 283 AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation) 284 __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg); 285 } 286 287 288 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 289 // Allocate a new index in table to hold the klass once it's been patched. 290 int index = __ oop_recorder()->allocate_metadata_index(NULL); 291 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 292 293 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); 294 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 295 __ load_const(reg, addrlit, R0); 296 297 patching_epilog(patch, lir_patch_normal, reg, info); 298 } 299 300 301 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 302 const bool is_int = result->is_single_cpu(); 303 Register Rdividend = is_int ? left->as_register() : left->as_register_lo(); 304 Register Rdivisor = noreg; 305 Register Rscratch = temp->as_register(); 306 Register Rresult = is_int ? result->as_register() : result->as_register_lo(); 307 long divisor = -1; 308 309 if (right->is_register()) { 310 Rdivisor = is_int ? right->as_register() : right->as_register_lo(); 311 } else { 312 divisor = is_int ? right->as_constant_ptr()->as_jint() 313 : right->as_constant_ptr()->as_jlong(); 314 } 315 316 assert(Rdividend != Rscratch, ""); 317 assert(Rdivisor != Rscratch, ""); 318 assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv"); 319 320 if (Rdivisor == noreg) { 321 if (divisor == 1) { // stupid, but can happen 322 if (code == lir_idiv) { 323 __ mr_if_needed(Rresult, Rdividend); 324 } else { 325 __ li(Rresult, 0); 326 } 327 328 } else if (is_power_of_2(divisor)) { 329 // Convert division by a power of two into some shifts and logical operations. 330 int log2 = log2_intptr(divisor); 331 332 // Round towards 0. 333 if (divisor == 2) { 334 if (is_int) { 335 __ srwi(Rscratch, Rdividend, 31); 336 } else { 337 __ srdi(Rscratch, Rdividend, 63); 338 } 339 } else { 340 if (is_int) { 341 __ srawi(Rscratch, Rdividend, 31); 342 } else { 343 __ sradi(Rscratch, Rdividend, 63); 344 } 345 __ clrldi(Rscratch, Rscratch, 64-log2); 346 } 347 __ add(Rscratch, Rdividend, Rscratch); 348 349 if (code == lir_idiv) { 350 if (is_int) { 351 __ srawi(Rresult, Rscratch, log2); 352 } else { 353 __ sradi(Rresult, Rscratch, log2); 354 } 355 } else { // lir_irem 356 __ clrrdi(Rscratch, Rscratch, log2); 357 __ sub(Rresult, Rdividend, Rscratch); 358 } 359 360 } else if (divisor == -1) { 361 if (code == lir_idiv) { 362 __ neg(Rresult, Rdividend); 363 } else { 364 __ li(Rresult, 0); 365 } 366 367 } else { 368 __ load_const_optimized(Rscratch, divisor); 369 if (code == lir_idiv) { 370 if (is_int) { 371 __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 372 } else { 373 __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 374 } 375 } else { 376 assert(Rscratch != R0, "need both"); 377 if (is_int) { 378 __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1. 379 __ mullw(Rscratch, R0, Rscratch); 380 } else { 381 __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1. 382 __ mulld(Rscratch, R0, Rscratch); 383 } 384 __ sub(Rresult, Rdividend, Rscratch); 385 } 386 387 } 388 return; 389 } 390 391 Label regular, done; 392 if (is_int) { 393 __ cmpwi(CCR0, Rdivisor, -1); 394 } else { 395 __ cmpdi(CCR0, Rdivisor, -1); 396 } 397 __ bne(CCR0, regular); 398 if (code == lir_idiv) { 399 __ neg(Rresult, Rdividend); 400 __ b(done); 401 __ bind(regular); 402 if (is_int) { 403 __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 404 } else { 405 __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 406 } 407 } else { // lir_irem 408 __ li(Rresult, 0); 409 __ b(done); 410 __ bind(regular); 411 if (is_int) { 412 __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 413 __ mullw(Rscratch, Rscratch, Rdivisor); 414 } else { 415 __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 416 __ mulld(Rscratch, Rscratch, Rdivisor); 417 } 418 __ sub(Rresult, Rdividend, Rscratch); 419 } 420 __ bind(done); 421 } 422 423 424 void LIR_Assembler::emit_op3(LIR_Op3* op) { 425 switch (op->code()) { 426 case lir_idiv: 427 case lir_irem: 428 arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), 429 op->result_opr(), op->info()); 430 break; 431 case lir_fmad: 432 __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), 433 op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); 434 break; 435 case lir_fmaf: 436 __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), 437 op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); 438 break; 439 default: ShouldNotReachHere(); break; 440 } 441 } 442 443 444 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 445 #ifdef ASSERT 446 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 447 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 448 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 449 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 450 #endif 451 452 Label *L = op->label(); 453 if (op->cond() == lir_cond_always) { 454 __ b(*L); 455 } else { 456 Label done; 457 bool is_unordered = false; 458 if (op->code() == lir_cond_float_branch) { 459 assert(op->ublock() != NULL, "must have unordered successor"); 460 is_unordered = true; 461 } else { 462 assert(op->code() == lir_branch, "just checking"); 463 } 464 465 bool positive = false; 466 Assembler::Condition cond = Assembler::equal; 467 switch (op->cond()) { 468 case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break; 469 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break; 470 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 471 case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 472 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 473 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 474 case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 475 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 476 default: ShouldNotReachHere(); 477 } 478 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 479 int bi = Assembler::bi0(BOOL_RESULT, cond); 480 if (is_unordered) { 481 if (positive) { 482 if (op->ublock() == op->block()) { 483 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L); 484 } 485 } else { 486 if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); } 487 } 488 } 489 __ bc_far_optimized(bo, bi, *L); 490 __ bind(done); 491 } 492 } 493 494 495 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 496 Bytecodes::Code code = op->bytecode(); 497 LIR_Opr src = op->in_opr(), 498 dst = op->result_opr(); 499 500 switch(code) { 501 case Bytecodes::_i2l: { 502 __ extsw(dst->as_register_lo(), src->as_register()); 503 break; 504 } 505 case Bytecodes::_l2i: { 506 __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage 507 break; 508 } 509 case Bytecodes::_i2b: { 510 __ extsb(dst->as_register(), src->as_register()); 511 break; 512 } 513 case Bytecodes::_i2c: { 514 __ clrldi(dst->as_register(), src->as_register(), 64-16); 515 break; 516 } 517 case Bytecodes::_i2s: { 518 __ extsh(dst->as_register(), src->as_register()); 519 break; 520 } 521 case Bytecodes::_i2d: 522 case Bytecodes::_l2d: { 523 bool src_in_memory = !VM_Version::has_mtfprd(); 524 FloatRegister rdst = dst->as_double_reg(); 525 FloatRegister rsrc; 526 if (src_in_memory) { 527 rsrc = src->as_double_reg(); // via mem 528 } else { 529 // move src to dst register 530 if (code == Bytecodes::_i2d) { 531 __ mtfprwa(rdst, src->as_register()); 532 } else { 533 __ mtfprd(rdst, src->as_register_lo()); 534 } 535 rsrc = rdst; 536 } 537 __ fcfid(rdst, rsrc); 538 break; 539 } 540 case Bytecodes::_i2f: 541 case Bytecodes::_l2f: { 542 bool src_in_memory = !VM_Version::has_mtfprd(); 543 FloatRegister rdst = dst->as_float_reg(); 544 FloatRegister rsrc; 545 if (src_in_memory) { 546 rsrc = src->as_double_reg(); // via mem 547 } else { 548 // move src to dst register 549 if (code == Bytecodes::_i2f) { 550 __ mtfprwa(rdst, src->as_register()); 551 } else { 552 __ mtfprd(rdst, src->as_register_lo()); 553 } 554 rsrc = rdst; 555 } 556 if (VM_Version::has_fcfids()) { 557 __ fcfids(rdst, rsrc); 558 } else { 559 assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility"); 560 __ fcfid(rdst, rsrc); 561 __ frsp(rdst, rdst); 562 } 563 break; 564 } 565 case Bytecodes::_f2d: { 566 __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); 567 break; 568 } 569 case Bytecodes::_d2f: { 570 __ frsp(dst->as_float_reg(), src->as_double_reg()); 571 break; 572 } 573 case Bytecodes::_d2i: 574 case Bytecodes::_f2i: { 575 bool dst_in_memory = !VM_Version::has_mtfprd(); 576 FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg(); 577 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 578 Label L; 579 // Result must be 0 if value is NaN; test by comparing value to itself. 580 __ fcmpu(CCR0, rsrc, rsrc); 581 if (dst_in_memory) { 582 __ li(R0, 0); // 0 in case of NAN 583 __ std(R0, addr.disp(), addr.base()); 584 } else { 585 __ li(dst->as_register(), 0); 586 } 587 __ bso(CCR0, L); 588 __ fctiwz(rsrc, rsrc); // USE_KILL 589 if (dst_in_memory) { 590 __ stfd(rsrc, addr.disp(), addr.base()); 591 } else { 592 __ mffprd(dst->as_register(), rsrc); 593 } 594 __ bind(L); 595 break; 596 } 597 case Bytecodes::_d2l: 598 case Bytecodes::_f2l: { 599 bool dst_in_memory = !VM_Version::has_mtfprd(); 600 FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg(); 601 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 602 Label L; 603 // Result must be 0 if value is NaN; test by comparing value to itself. 604 __ fcmpu(CCR0, rsrc, rsrc); 605 if (dst_in_memory) { 606 __ li(R0, 0); // 0 in case of NAN 607 __ std(R0, addr.disp(), addr.base()); 608 } else { 609 __ li(dst->as_register_lo(), 0); 610 } 611 __ bso(CCR0, L); 612 __ fctidz(rsrc, rsrc); // USE_KILL 613 if (dst_in_memory) { 614 __ stfd(rsrc, addr.disp(), addr.base()); 615 } else { 616 __ mffprd(dst->as_register_lo(), rsrc); 617 } 618 __ bind(L); 619 break; 620 } 621 622 default: ShouldNotReachHere(); 623 } 624 } 625 626 627 void LIR_Assembler::align_call(LIR_Code) { 628 // do nothing since all instructions are word aligned on ppc 629 } 630 631 632 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { 633 int start_offset = __ offset(); 634 // Put the entry point as a constant into the constant pool. 635 const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); 636 if (entry_point_toc_addr == NULL) { 637 bailout("const section overflow"); 638 return false; 639 } 640 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 641 642 // Emit the trampoline stub which will be related to the branch-and-link below. 643 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); 644 if (!stub) { 645 bailout("no space for trampoline stub"); 646 return false; 647 } 648 return true; 649 } 650 651 652 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 653 assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype"); 654 655 bool success = emit_trampoline_stub_for_call(op->addr()); 656 if (!success) { return; } 657 658 __ relocate(rtype); 659 // Note: At this point we do not have the address of the trampoline 660 // stub, and the entry point might be too far away for bl, so __ pc() 661 // serves as dummy and the bl will be patched later. 662 __ code()->set_insts_mark(); 663 __ bl(__ pc()); 664 add_call_info(code_offset(), op->info()); 665 } 666 667 668 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 669 __ calculate_address_from_global_toc(R2_TOC, __ method_toc()); 670 671 // Virtual call relocation will point to ic load. 672 address virtual_call_meta_addr = __ pc(); 673 // Load a clear inline cache. 674 AddressLiteral empty_ic((address) Universe::non_oop_word()); 675 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); 676 if (!success) { 677 bailout("const section overflow"); 678 return; 679 } 680 // Call to fixup routine. Fixup routine uses ScopeDesc info 681 // to determine who we intended to call. 682 __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); 683 684 success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); 685 if (!success) { return; } 686 687 // Note: At this point we do not have the address of the trampoline 688 // stub, and the entry point might be too far away for bl, so __ pc() 689 // serves as dummy and the bl will be patched later. 690 __ bl(__ pc()); 691 add_call_info(code_offset(), op->info()); 692 } 693 694 695 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 696 ShouldNotReachHere(); // ic_call is used instead. 697 } 698 699 700 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) { 701 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info); 702 __ null_check(addr, stub->entry()); 703 append_code_stub(stub); 704 } 705 706 707 // Attention: caller must encode oop if needed 708 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 709 int store_offset; 710 if (!Assembler::is_simm16(offset)) { 711 // For offsets larger than a simm16 we setup the offset. 712 assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case"); 713 __ load_const_optimized(R0, offset); 714 store_offset = store(from_reg, base, R0, type, wide); 715 } else { 716 store_offset = code_offset(); 717 switch (type) { 718 case T_BOOLEAN: // fall through 719 case T_BYTE : __ stb(from_reg->as_register(), offset, base); break; 720 case T_CHAR : 721 case T_SHORT : __ sth(from_reg->as_register(), offset, base); break; 722 case T_INT : __ stw(from_reg->as_register(), offset, base); break; 723 case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break; 724 case T_ADDRESS: 725 case T_METADATA: __ std(from_reg->as_register(), offset, base); break; 726 case T_ARRAY : // fall through 727 case T_OBJECT: 728 { 729 if (UseCompressedOops && !wide) { 730 // Encoding done in caller 731 __ stw(from_reg->as_register(), offset, base); 732 } else { 733 __ std(from_reg->as_register(), offset, base); 734 } 735 __ verify_oop(from_reg->as_register()); 736 break; 737 } 738 case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break; 739 case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break; 740 default : ShouldNotReachHere(); 741 } 742 } 743 return store_offset; 744 } 745 746 747 // Attention: caller must encode oop if needed 748 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 749 int store_offset = code_offset(); 750 switch (type) { 751 case T_BOOLEAN: // fall through 752 case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break; 753 case T_CHAR : 754 case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break; 755 case T_INT : __ stwx(from_reg->as_register(), base, disp); break; 756 case T_LONG : 757 #ifdef _LP64 758 __ stdx(from_reg->as_register_lo(), base, disp); 759 #else 760 Unimplemented(); 761 #endif 762 break; 763 case T_ADDRESS: 764 __ stdx(from_reg->as_register(), base, disp); 765 break; 766 case T_ARRAY : // fall through 767 case T_OBJECT: 768 { 769 if (UseCompressedOops && !wide) { 770 // Encoding done in caller. 771 __ stwx(from_reg->as_register(), base, disp); 772 } else { 773 __ stdx(from_reg->as_register(), base, disp); 774 } 775 __ verify_oop(from_reg->as_register()); // kills R0 776 break; 777 } 778 case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break; 779 case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break; 780 default : ShouldNotReachHere(); 781 } 782 return store_offset; 783 } 784 785 786 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 787 int load_offset; 788 if (!Assembler::is_simm16(offset)) { 789 // For offsets larger than a simm16 we setup the offset. 790 __ load_const_optimized(R0, offset); 791 load_offset = load(base, R0, to_reg, type, wide); 792 } else { 793 load_offset = code_offset(); 794 switch(type) { 795 case T_BOOLEAN: // fall through 796 case T_BYTE : __ lbz(to_reg->as_register(), offset, base); 797 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 798 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break; 799 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break; 800 case T_INT : __ lwa(to_reg->as_register(), offset, base); break; 801 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break; 802 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break; 803 case T_ADDRESS: 804 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 805 __ lwz(to_reg->as_register(), offset, base); 806 __ decode_klass_not_null(to_reg->as_register()); 807 } else { 808 __ ld(to_reg->as_register(), offset, base); 809 } 810 break; 811 case T_ARRAY : // fall through 812 case T_OBJECT: 813 { 814 if (UseCompressedOops && !wide) { 815 __ lwz(to_reg->as_register(), offset, base); 816 __ decode_heap_oop(to_reg->as_register()); 817 } else { 818 __ ld(to_reg->as_register(), offset, base); 819 } 820 __ verify_oop(to_reg->as_register()); 821 break; 822 } 823 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break; 824 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break; 825 default : ShouldNotReachHere(); 826 } 827 } 828 return load_offset; 829 } 830 831 832 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 833 int load_offset = code_offset(); 834 switch(type) { 835 case T_BOOLEAN: // fall through 836 case T_BYTE : __ lbzx(to_reg->as_register(), base, disp); 837 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 838 case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break; 839 case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break; 840 case T_INT : __ lwax(to_reg->as_register(), base, disp); break; 841 case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break; 842 case T_ARRAY : // fall through 843 case T_OBJECT: 844 { 845 if (UseCompressedOops && !wide) { 846 __ lwzx(to_reg->as_register(), base, disp); 847 __ decode_heap_oop(to_reg->as_register()); 848 } else { 849 __ ldx(to_reg->as_register(), base, disp); 850 } 851 __ verify_oop(to_reg->as_register()); 852 break; 853 } 854 case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break; 855 case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break; 856 case T_LONG : 857 #ifdef _LP64 858 __ ldx(to_reg->as_register_lo(), base, disp); 859 #else 860 Unimplemented(); 861 #endif 862 break; 863 default : ShouldNotReachHere(); 864 } 865 return load_offset; 866 } 867 868 869 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 870 LIR_Const* c = src->as_constant_ptr(); 871 Register src_reg = R0; 872 switch (c->type()) { 873 case T_INT: 874 case T_FLOAT: { 875 int value = c->as_jint_bits(); 876 __ load_const_optimized(src_reg, value); 877 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 878 __ stw(src_reg, addr.disp(), addr.base()); 879 break; 880 } 881 case T_ADDRESS: { 882 int value = c->as_jint_bits(); 883 __ load_const_optimized(src_reg, value); 884 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 885 __ std(src_reg, addr.disp(), addr.base()); 886 break; 887 } 888 case T_OBJECT: { 889 jobject2reg(c->as_jobject(), src_reg); 890 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 891 __ std(src_reg, addr.disp(), addr.base()); 892 break; 893 } 894 case T_LONG: 895 case T_DOUBLE: { 896 int value = c->as_jlong_bits(); 897 __ load_const_optimized(src_reg, value); 898 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 899 __ std(src_reg, addr.disp(), addr.base()); 900 break; 901 } 902 default: 903 Unimplemented(); 904 } 905 } 906 907 908 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 909 LIR_Const* c = src->as_constant_ptr(); 910 LIR_Address* addr = dest->as_address_ptr(); 911 Register base = addr->base()->as_pointer_register(); 912 LIR_Opr tmp = LIR_OprFact::illegalOpr; 913 int offset = -1; 914 // Null check for large offsets in LIRGenerator::do_StoreField. 915 bool needs_explicit_null_check = !ImplicitNullChecks; 916 917 if (info != NULL && needs_explicit_null_check) { 918 explicit_null_check(base, info); 919 } 920 921 switch (c->type()) { 922 case T_FLOAT: type = T_INT; 923 case T_INT: 924 case T_ADDRESS: { 925 tmp = FrameMap::R0_opr; 926 __ load_const_optimized(tmp->as_register(), c->as_jint_bits()); 927 break; 928 } 929 case T_DOUBLE: type = T_LONG; 930 case T_LONG: { 931 tmp = FrameMap::R0_long_opr; 932 __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits()); 933 break; 934 } 935 case T_OBJECT: { 936 tmp = FrameMap::R0_opr; 937 if (UseCompressedOops && !wide && c->as_jobject() != NULL) { 938 AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); 939 __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw). 940 __ relocate(oop_addr.rspec(), /*compressed format*/ 1); 941 __ ori(R0, R0, oop_addr.value() & 0xffff); 942 } else { 943 jobject2reg(c->as_jobject(), R0); 944 } 945 break; 946 } 947 default: 948 Unimplemented(); 949 } 950 951 // Handle either reg+reg or reg+disp address. 952 if (addr->index()->is_valid()) { 953 assert(addr->disp() == 0, "must be zero"); 954 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 955 } else { 956 assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses"); 957 offset = store(tmp, base, addr->disp(), type, wide, false); 958 } 959 960 if (info != NULL) { 961 assert(offset != -1, "offset should've been set"); 962 if (!needs_explicit_null_check) { 963 add_debug_info_for_null_check(offset, info); 964 } 965 } 966 } 967 968 969 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 970 LIR_Const* c = src->as_constant_ptr(); 971 LIR_Opr to_reg = dest; 972 973 switch (c->type()) { 974 case T_INT: { 975 assert(patch_code == lir_patch_none, "no patching handled here"); 976 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); 977 break; 978 } 979 case T_ADDRESS: { 980 assert(patch_code == lir_patch_none, "no patching handled here"); 981 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ... 982 break; 983 } 984 case T_LONG: { 985 assert(patch_code == lir_patch_none, "no patching handled here"); 986 __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0); 987 break; 988 } 989 990 case T_OBJECT: { 991 if (patch_code == lir_patch_none) { 992 jobject2reg(c->as_jobject(), to_reg->as_register()); 993 } else { 994 jobject2reg_with_patching(to_reg->as_register(), info); 995 } 996 break; 997 } 998 999 case T_METADATA: 1000 { 1001 if (patch_code == lir_patch_none) { 1002 metadata2reg(c->as_metadata(), to_reg->as_register()); 1003 } else { 1004 klass2reg_with_patching(to_reg->as_register(), info); 1005 } 1006 } 1007 break; 1008 1009 case T_FLOAT: 1010 { 1011 if (to_reg->is_single_fpu()) { 1012 address const_addr = __ float_constant(c->as_jfloat()); 1013 if (const_addr == NULL) { 1014 bailout("const section overflow"); 1015 break; 1016 } 1017 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1018 __ relocate(rspec); 1019 __ load_const(R0, const_addr); 1020 __ lfsx(to_reg->as_float_reg(), R0); 1021 } else { 1022 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1023 __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0); 1024 } 1025 } 1026 break; 1027 1028 case T_DOUBLE: 1029 { 1030 if (to_reg->is_double_fpu()) { 1031 address const_addr = __ double_constant(c->as_jdouble()); 1032 if (const_addr == NULL) { 1033 bailout("const section overflow"); 1034 break; 1035 } 1036 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1037 __ relocate(rspec); 1038 __ load_const(R0, const_addr); 1039 __ lfdx(to_reg->as_double_reg(), R0); 1040 } else { 1041 assert(to_reg->is_double_cpu(), "Must be a long register."); 1042 __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0); 1043 } 1044 } 1045 break; 1046 1047 default: 1048 ShouldNotReachHere(); 1049 } 1050 } 1051 1052 1053 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1054 Unimplemented(); return Address(); 1055 } 1056 1057 1058 inline RegisterOrConstant index_or_disp(LIR_Address* addr) { 1059 if (addr->index()->is_illegal()) { 1060 return (RegisterOrConstant)(addr->disp()); 1061 } else { 1062 return (RegisterOrConstant)(addr->index()->as_pointer_register()); 1063 } 1064 } 1065 1066 1067 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1068 const Register tmp = R0; 1069 switch (type) { 1070 case T_INT: 1071 case T_FLOAT: { 1072 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1073 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1074 __ lwz(tmp, from.disp(), from.base()); 1075 __ stw(tmp, to.disp(), to.base()); 1076 break; 1077 } 1078 case T_ADDRESS: 1079 case T_OBJECT: { 1080 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1081 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1082 __ ld(tmp, from.disp(), from.base()); 1083 __ std(tmp, to.disp(), to.base()); 1084 break; 1085 } 1086 case T_LONG: 1087 case T_DOUBLE: { 1088 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1089 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1090 __ ld(tmp, from.disp(), from.base()); 1091 __ std(tmp, to.disp(), to.base()); 1092 break; 1093 } 1094 1095 default: 1096 ShouldNotReachHere(); 1097 } 1098 } 1099 1100 1101 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1102 Unimplemented(); return Address(); 1103 } 1104 1105 1106 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1107 Unimplemented(); return Address(); 1108 } 1109 1110 1111 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1112 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1113 1114 assert(type != T_METADATA, "load of metadata ptr not supported"); 1115 LIR_Address* addr = src_opr->as_address_ptr(); 1116 LIR_Opr to_reg = dest; 1117 1118 Register src = addr->base()->as_pointer_register(); 1119 Register disp_reg = noreg; 1120 int disp_value = addr->disp(); 1121 bool needs_patching = (patch_code != lir_patch_none); 1122 // null check for large offsets in LIRGenerator::do_LoadField 1123 bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; 1124 1125 if (info != NULL && needs_explicit_null_check) { 1126 explicit_null_check(src, info); 1127 } 1128 1129 if (addr->base()->type() == T_OBJECT) { 1130 __ verify_oop(src); 1131 } 1132 1133 PatchingStub* patch = NULL; 1134 if (needs_patching) { 1135 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1136 assert(!to_reg->is_double_cpu() || 1137 patch_code == lir_patch_none || 1138 patch_code == lir_patch_normal, "patching doesn't match register"); 1139 } 1140 1141 if (addr->index()->is_illegal()) { 1142 if (!Assembler::is_simm16(disp_value)) { 1143 if (needs_patching) { 1144 __ load_const32(R0, 0); // patchable int 1145 } else { 1146 __ load_const_optimized(R0, disp_value); 1147 } 1148 disp_reg = R0; 1149 } 1150 } else { 1151 disp_reg = addr->index()->as_pointer_register(); 1152 assert(disp_value == 0, "can't handle 3 operand addresses"); 1153 } 1154 1155 // Remember the offset of the load. The patching_epilog must be done 1156 // before the call to add_debug_info, otherwise the PcDescs don't get 1157 // entered in increasing order. 1158 int offset; 1159 1160 if (disp_reg == noreg) { 1161 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1162 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1163 } else { 1164 assert(!unaligned, "unexpected"); 1165 offset = load(src, disp_reg, to_reg, type, wide); 1166 } 1167 1168 if (patch != NULL) { 1169 patching_epilog(patch, patch_code, src, info); 1170 } 1171 if (info != NULL && !needs_explicit_null_check) { 1172 add_debug_info_for_null_check(offset, info); 1173 } 1174 } 1175 1176 1177 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1178 Address addr; 1179 if (src->is_single_word()) { 1180 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1181 } else if (src->is_double_word()) { 1182 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1183 } 1184 1185 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1186 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1187 } 1188 1189 1190 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1191 Address addr; 1192 if (dest->is_single_word()) { 1193 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1194 } else if (dest->is_double_word()) { 1195 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1196 } 1197 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1198 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1199 } 1200 1201 1202 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1203 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1204 if (from_reg->is_double_fpu()) { 1205 // double to double moves 1206 assert(to_reg->is_double_fpu(), "should match"); 1207 __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg()); 1208 } else { 1209 // float to float moves 1210 assert(to_reg->is_single_fpu(), "should match"); 1211 __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg()); 1212 } 1213 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1214 if (from_reg->is_double_cpu()) { 1215 __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1216 } else if (to_reg->is_double_cpu()) { 1217 // int to int moves 1218 __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register()); 1219 } else { 1220 // int to int moves 1221 __ mr_if_needed(to_reg->as_register(), from_reg->as_register()); 1222 } 1223 } else { 1224 ShouldNotReachHere(); 1225 } 1226 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1227 __ verify_oop(to_reg->as_register()); 1228 } 1229 } 1230 1231 1232 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1233 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1234 bool wide, bool unaligned) { 1235 assert(type != T_METADATA, "store of metadata ptr not supported"); 1236 LIR_Address* addr = dest->as_address_ptr(); 1237 1238 Register src = addr->base()->as_pointer_register(); 1239 Register disp_reg = noreg; 1240 int disp_value = addr->disp(); 1241 bool needs_patching = (patch_code != lir_patch_none); 1242 bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide && 1243 Universe::narrow_oop_mode() != Universe::UnscaledNarrowOop; 1244 bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value); 1245 bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29. 1246 // Null check for large offsets in LIRGenerator::do_StoreField. 1247 bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; 1248 1249 if (info != NULL && needs_explicit_null_check) { 1250 explicit_null_check(src, info); 1251 } 1252 1253 if (addr->base()->is_oop_register()) { 1254 __ verify_oop(src); 1255 } 1256 1257 PatchingStub* patch = NULL; 1258 if (needs_patching) { 1259 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1260 assert(!from_reg->is_double_cpu() || 1261 patch_code == lir_patch_none || 1262 patch_code == lir_patch_normal, "patching doesn't match register"); 1263 } 1264 1265 if (addr->index()->is_illegal()) { 1266 if (load_disp) { 1267 disp_reg = use_R29 ? R29_TOC : R0; 1268 if (needs_patching) { 1269 __ load_const32(disp_reg, 0); // patchable int 1270 } else { 1271 __ load_const_optimized(disp_reg, disp_value); 1272 } 1273 } 1274 } else { 1275 disp_reg = addr->index()->as_pointer_register(); 1276 assert(disp_value == 0, "can't handle 3 operand addresses"); 1277 } 1278 1279 // remember the offset of the store. The patching_epilog must be done 1280 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1281 // entered in increasing order. 1282 int offset; 1283 1284 if (compress_oop) { 1285 Register co = __ encode_heap_oop(R0, from_reg->as_register()); 1286 from_reg = FrameMap::as_opr(co); 1287 } 1288 1289 if (disp_reg == noreg) { 1290 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1291 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1292 } else { 1293 assert(!unaligned, "unexpected"); 1294 offset = store(from_reg, src, disp_reg, type, wide); 1295 } 1296 1297 if (use_R29) { 1298 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit 1299 } 1300 1301 if (patch != NULL) { 1302 patching_epilog(patch, patch_code, src, info); 1303 } 1304 1305 if (info != NULL && !needs_explicit_null_check) { 1306 add_debug_info_for_null_check(offset, info); 1307 } 1308 } 1309 1310 1311 void LIR_Assembler::return_op(LIR_Opr result) { 1312 const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). 1313 const Register polling_page = R12; 1314 1315 // Pop the stack before the safepoint code. 1316 int frame_size = initial_frame_size_in_bytes(); 1317 if (Assembler::is_simm(frame_size, 16)) { 1318 __ addi(R1_SP, R1_SP, frame_size); 1319 } else { 1320 __ pop_frame(); 1321 } 1322 1323 if (SafepointMechanism::uses_thread_local_poll()) { 1324 __ ld(polling_page, in_bytes(Thread::polling_page_offset()), R16_thread); 1325 } else { 1326 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); 1327 } 1328 1329 // Restore return pc relative to callers' sp. 1330 __ ld(return_pc, _abi(lr), R1_SP); 1331 // Move return pc to LR. 1332 __ mtlr(return_pc); 1333 1334 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1335 __ reserved_stack_check(return_pc); 1336 } 1337 1338 // We need to mark the code position where the load from the safepoint 1339 // polling page was emitted as relocInfo::poll_return_type here. 1340 __ relocate(relocInfo::poll_return_type); 1341 __ load_from_polling_page(polling_page); 1342 1343 // Return. 1344 __ blr(); 1345 } 1346 1347 1348 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1349 const Register poll_addr = tmp->as_register(); 1350 if (SafepointMechanism::uses_thread_local_poll()) { 1351 __ ld(poll_addr, in_bytes(Thread::polling_page_offset()), R16_thread); 1352 } else { 1353 __ load_const_optimized(poll_addr, (intptr_t)os::get_polling_page(), R0); 1354 } 1355 if (info != NULL) { 1356 add_debug_info_for_branch(info); 1357 } 1358 int offset = __ offset(); 1359 __ relocate(relocInfo::poll_type); 1360 __ load_from_polling_page(poll_addr); 1361 1362 return offset; 1363 } 1364 1365 1366 void LIR_Assembler::emit_static_call_stub() { 1367 address call_pc = __ pc(); 1368 address stub = __ start_a_stub(static_call_stub_size()); 1369 if (stub == NULL) { 1370 bailout("static call stub overflow"); 1371 return; 1372 } 1373 1374 // For java_to_interp stubs we use R11_scratch1 as scratch register 1375 // and in call trampoline stubs we use R12_scratch2. This way we 1376 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1377 const Register reg_scratch = R11_scratch1; 1378 1379 // Create a static stub relocation which relates this stub 1380 // with the call instruction at insts_call_instruction_offset in the 1381 // instructions code-section. 1382 int start = __ offset(); 1383 __ relocate(static_stub_Relocation::spec(call_pc)); 1384 1385 // Now, create the stub's code: 1386 // - load the TOC 1387 // - load the inline cache oop from the constant pool 1388 // - load the call target from the constant pool 1389 // - call 1390 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1391 AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); 1392 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); 1393 1394 if (ReoptimizeCallSequences) { 1395 __ b64_patchable((address)-1, relocInfo::none); 1396 } else { 1397 AddressLiteral a((address)-1); 1398 success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); 1399 __ mtctr(reg_scratch); 1400 __ bctr(); 1401 } 1402 if (!success) { 1403 bailout("const section overflow"); 1404 return; 1405 } 1406 1407 assert(__ offset() - start <= static_call_stub_size(), "stub too big"); 1408 __ end_a_stub(); 1409 } 1410 1411 1412 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1413 bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual); 1414 if (opr1->is_single_fpu()) { 1415 __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg()); 1416 } else if (opr1->is_double_fpu()) { 1417 __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg()); 1418 } else if (opr1->is_single_cpu()) { 1419 if (opr2->is_constant()) { 1420 switch (opr2->as_constant_ptr()->type()) { 1421 case T_INT: 1422 { 1423 jint con = opr2->as_constant_ptr()->as_jint(); 1424 if (unsigned_comp) { 1425 if (Assembler::is_uimm(con, 16)) { 1426 __ cmplwi(BOOL_RESULT, opr1->as_register(), con); 1427 } else { 1428 __ load_const_optimized(R0, con); 1429 __ cmplw(BOOL_RESULT, opr1->as_register(), R0); 1430 } 1431 } else { 1432 if (Assembler::is_simm(con, 16)) { 1433 __ cmpwi(BOOL_RESULT, opr1->as_register(), con); 1434 } else { 1435 __ load_const_optimized(R0, con); 1436 __ cmpw(BOOL_RESULT, opr1->as_register(), R0); 1437 } 1438 } 1439 } 1440 break; 1441 1442 case T_OBJECT: 1443 // There are only equal/notequal comparisons on objects. 1444 { 1445 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1446 jobject con = opr2->as_constant_ptr()->as_jobject(); 1447 if (con == NULL) { 1448 __ cmpdi(BOOL_RESULT, opr1->as_register(), 0); 1449 } else { 1450 jobject2reg(con, R0); 1451 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1452 } 1453 } 1454 break; 1455 1456 default: 1457 ShouldNotReachHere(); 1458 break; 1459 } 1460 } else { 1461 assert(opr1->type() != T_ADDRESS && opr2->type() != T_ADDRESS, "currently unsupported"); 1462 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) { 1463 // There are only equal/notequal comparisons on objects. 1464 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1465 __ cmpd(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1466 } else { 1467 if (unsigned_comp) { 1468 __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1469 } else { 1470 __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1471 } 1472 } 1473 } 1474 } else if (opr1->is_double_cpu()) { 1475 if (opr2->is_constant()) { 1476 jlong con = opr2->as_constant_ptr()->as_jlong(); 1477 if (unsigned_comp) { 1478 if (Assembler::is_uimm(con, 16)) { 1479 __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con); 1480 } else { 1481 __ load_const_optimized(R0, con); 1482 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0); 1483 } 1484 } else { 1485 if (Assembler::is_simm(con, 16)) { 1486 __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con); 1487 } else { 1488 __ load_const_optimized(R0, con); 1489 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0); 1490 } 1491 } 1492 } else if (opr2->is_register()) { 1493 if (unsigned_comp) { 1494 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1495 } else { 1496 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1497 } 1498 } else { 1499 ShouldNotReachHere(); 1500 } 1501 } else { 1502 ShouldNotReachHere(); 1503 } 1504 } 1505 1506 1507 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1508 const Register Rdst = dst->as_register(); 1509 Label done; 1510 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1511 bool is_unordered_less = (code == lir_ucmp_fd2i); 1512 if (left->is_single_fpu()) { 1513 __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg()); 1514 } else if (left->is_double_fpu()) { 1515 __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg()); 1516 } else { 1517 ShouldNotReachHere(); 1518 } 1519 __ li(Rdst, is_unordered_less ? -1 : 1); 1520 __ bso(CCR0, done); 1521 } else if (code == lir_cmp_l2i) { 1522 __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo()); 1523 } else { 1524 ShouldNotReachHere(); 1525 } 1526 __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1527 __ srwi(Rdst, R0, 30); 1528 __ srawi(R0, R0, 31); 1529 __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1 1530 __ bind(done); 1531 } 1532 1533 1534 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { 1535 if (src->is_constant()) { 1536 lasm->const2reg(src, dst, lir_patch_none, NULL); 1537 } else if (src->is_register()) { 1538 lasm->reg2reg(src, dst); 1539 } else if (src->is_stack()) { 1540 lasm->stack2reg(src, dst, dst->type()); 1541 } else { 1542 ShouldNotReachHere(); 1543 } 1544 } 1545 1546 1547 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1548 if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) { 1549 load_to_reg(this, opr1, result); // Condition doesn't matter. 1550 return; 1551 } 1552 1553 bool positive = false; 1554 Assembler::Condition cond = Assembler::equal; 1555 switch (condition) { 1556 case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; 1557 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; 1558 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 1559 case lir_cond_belowEqual: 1560 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 1561 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 1562 case lir_cond_aboveEqual: 1563 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 1564 default: ShouldNotReachHere(); 1565 } 1566 1567 // Try to use isel on >=Power7. 1568 if (VM_Version::has_isel() && result->is_cpu_register()) { 1569 bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); 1570 const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo(); 1571 1572 // We can use result_reg to load one operand if not already in register. 1573 Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg, 1574 second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg; 1575 1576 if (first != second) { 1577 if (!o1_is_reg) { 1578 load_to_reg(this, opr1, result); 1579 } 1580 1581 if (!o2_is_reg) { 1582 load_to_reg(this, opr2, result); 1583 } 1584 1585 __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second); 1586 return; 1587 } 1588 } // isel 1589 1590 load_to_reg(this, opr1, result); 1591 1592 Label skip; 1593 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1594 int bi = Assembler::bi0(BOOL_RESULT, cond); 1595 __ bc(bo, bi, skip); 1596 1597 load_to_reg(this, opr2, result); 1598 __ bind(skip); 1599 } 1600 1601 1602 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1603 CodeEmitInfo* info, bool pop_fpu_stack) { 1604 assert(info == NULL, "unused on this code path"); 1605 assert(left->is_register(), "wrong items state"); 1606 assert(dest->is_register(), "wrong items state"); 1607 1608 if (right->is_register()) { 1609 if (dest->is_float_kind()) { 1610 1611 FloatRegister lreg, rreg, res; 1612 if (right->is_single_fpu()) { 1613 lreg = left->as_float_reg(); 1614 rreg = right->as_float_reg(); 1615 res = dest->as_float_reg(); 1616 switch (code) { 1617 case lir_add: __ fadds(res, lreg, rreg); break; 1618 case lir_sub: __ fsubs(res, lreg, rreg); break; 1619 case lir_mul: // fall through 1620 case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break; 1621 case lir_div: // fall through 1622 case lir_div_strictfp: __ fdivs(res, lreg, rreg); break; 1623 default: ShouldNotReachHere(); 1624 } 1625 } else { 1626 lreg = left->as_double_reg(); 1627 rreg = right->as_double_reg(); 1628 res = dest->as_double_reg(); 1629 switch (code) { 1630 case lir_add: __ fadd(res, lreg, rreg); break; 1631 case lir_sub: __ fsub(res, lreg, rreg); break; 1632 case lir_mul: // fall through 1633 case lir_mul_strictfp: __ fmul(res, lreg, rreg); break; 1634 case lir_div: // fall through 1635 case lir_div_strictfp: __ fdiv(res, lreg, rreg); break; 1636 default: ShouldNotReachHere(); 1637 } 1638 } 1639 1640 } else if (dest->is_double_cpu()) { 1641 1642 Register dst_lo = dest->as_register_lo(); 1643 Register op1_lo = left->as_pointer_register(); 1644 Register op2_lo = right->as_pointer_register(); 1645 1646 switch (code) { 1647 case lir_add: __ add(dst_lo, op1_lo, op2_lo); break; 1648 case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break; 1649 case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break; 1650 default: ShouldNotReachHere(); 1651 } 1652 } else { 1653 assert (right->is_single_cpu(), "Just Checking"); 1654 1655 Register lreg = left->as_register(); 1656 Register res = dest->as_register(); 1657 Register rreg = right->as_register(); 1658 switch (code) { 1659 case lir_add: __ add (res, lreg, rreg); break; 1660 case lir_sub: __ sub (res, lreg, rreg); break; 1661 case lir_mul: __ mullw(res, lreg, rreg); break; 1662 default: ShouldNotReachHere(); 1663 } 1664 } 1665 } else { 1666 assert (right->is_constant(), "must be constant"); 1667 1668 if (dest->is_single_cpu()) { 1669 Register lreg = left->as_register(); 1670 Register res = dest->as_register(); 1671 int simm16 = right->as_constant_ptr()->as_jint(); 1672 1673 switch (code) { 1674 case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int 1675 simm16 = -simm16; 1676 case lir_add: if (res == lreg && simm16 == 0) break; 1677 __ addi(res, lreg, simm16); break; 1678 case lir_mul: if (res == lreg && simm16 == 1) break; 1679 __ mulli(res, lreg, simm16); break; 1680 default: ShouldNotReachHere(); 1681 } 1682 } else { 1683 Register lreg = left->as_pointer_register(); 1684 Register res = dest->as_register_lo(); 1685 long con = right->as_constant_ptr()->as_jlong(); 1686 assert(Assembler::is_simm16(con), "must be simm16"); 1687 1688 switch (code) { 1689 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1690 con = -con; 1691 case lir_add: if (res == lreg && con == 0) break; 1692 __ addi(res, lreg, (int)con); break; 1693 case lir_mul: if (res == lreg && con == 1) break; 1694 __ mulli(res, lreg, (int)con); break; 1695 default: ShouldNotReachHere(); 1696 } 1697 } 1698 } 1699 } 1700 1701 1702 void LIR_Assembler::fpop() { 1703 Unimplemented(); 1704 // do nothing 1705 } 1706 1707 1708 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1709 switch (code) { 1710 case lir_sqrt: { 1711 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1712 break; 1713 } 1714 case lir_abs: { 1715 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1716 break; 1717 } 1718 default: { 1719 ShouldNotReachHere(); 1720 break; 1721 } 1722 } 1723 } 1724 1725 1726 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1727 if (right->is_constant()) { // see do_LogicOp 1728 long uimm; 1729 Register d, l; 1730 if (dest->is_single_cpu()) { 1731 uimm = right->as_constant_ptr()->as_jint(); 1732 d = dest->as_register(); 1733 l = left->as_register(); 1734 } else { 1735 uimm = right->as_constant_ptr()->as_jlong(); 1736 d = dest->as_register_lo(); 1737 l = left->as_register_lo(); 1738 } 1739 long uimms = (unsigned long)uimm >> 16, 1740 uimmss = (unsigned long)uimm >> 32; 1741 1742 switch (code) { 1743 case lir_logic_and: 1744 if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) { 1745 __ andi(d, l, uimm); // special cases 1746 } else if (uimms != 0) { __ andis_(d, l, uimms); } 1747 else { __ andi_(d, l, uimm); } 1748 break; 1749 1750 case lir_logic_or: 1751 if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); } 1752 else { __ ori(d, l, uimm); } 1753 break; 1754 1755 case lir_logic_xor: 1756 if (uimm == -1) { __ nand(d, l, l); } // special case 1757 else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); } 1758 else { __ xori(d, l, uimm); } 1759 break; 1760 1761 default: ShouldNotReachHere(); 1762 } 1763 } else { 1764 assert(right->is_register(), "right should be in register"); 1765 1766 if (dest->is_single_cpu()) { 1767 switch (code) { 1768 case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break; 1769 case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break; 1770 case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break; 1771 default: ShouldNotReachHere(); 1772 } 1773 } else { 1774 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1775 left->as_register_lo(); 1776 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1777 right->as_register_lo(); 1778 1779 switch (code) { 1780 case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break; 1781 case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break; 1782 case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break; 1783 default: ShouldNotReachHere(); 1784 } 1785 } 1786 } 1787 } 1788 1789 1790 int LIR_Assembler::shift_amount(BasicType t) { 1791 int elem_size = type2aelembytes(t); 1792 switch (elem_size) { 1793 case 1 : return 0; 1794 case 2 : return 1; 1795 case 4 : return 2; 1796 case 8 : return 3; 1797 } 1798 ShouldNotReachHere(); 1799 return -1; 1800 } 1801 1802 1803 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1804 info->add_register_oop(exceptionOop); 1805 1806 // Reuse the debug info from the safepoint poll for the throw op itself. 1807 address pc_for_athrow = __ pc(); 1808 int pc_for_athrow_offset = __ offset(); 1809 //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1810 //__ relocate(rspec); 1811 //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0); 1812 __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); 1813 add_call_info(pc_for_athrow_offset, info); // for exception handler 1814 1815 address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1816 : Runtime1::handle_exception_nofpu_id); 1817 //__ load_const_optimized(R0, stub); 1818 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 1819 __ mtctr(R0); 1820 __ bctr(); 1821 } 1822 1823 1824 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1825 // Note: Not used with EnableDebuggingOnDemand. 1826 assert(exceptionOop->as_register() == R3, "should match"); 1827 __ b(_unwind_handler_entry); 1828 } 1829 1830 1831 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1832 Register src = op->src()->as_register(); 1833 Register dst = op->dst()->as_register(); 1834 Register src_pos = op->src_pos()->as_register(); 1835 Register dst_pos = op->dst_pos()->as_register(); 1836 Register length = op->length()->as_register(); 1837 Register tmp = op->tmp()->as_register(); 1838 Register tmp2 = R0; 1839 1840 int flags = op->flags(); 1841 ciArrayKlass* default_type = op->expected_type(); 1842 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1843 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1844 1845 // Set up the arraycopy stub information. 1846 ArrayCopyStub* stub = op->stub(); 1847 const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. 1848 1849 // Always do stub if no type information is available. It's ok if 1850 // the known type isn't loaded since the code sanity checks 1851 // in debug mode and the type isn't required when we know the exact type 1852 // also check that the type is an array type. 1853 if (op->expected_type() == NULL) { 1854 assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && 1855 length->is_nonvolatile(), "must preserve"); 1856 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1857 assert(copyfunc_addr != NULL, "generic arraycopy stub required"); 1858 1859 // 3 parms are int. Convert to long. 1860 __ mr(R3_ARG1, src); 1861 __ extsw(R4_ARG2, src_pos); 1862 __ mr(R5_ARG3, dst); 1863 __ extsw(R6_ARG4, dst_pos); 1864 __ extsw(R7_ARG5, length); 1865 1866 #ifndef PRODUCT 1867 if (PrintC1Statistics) { 1868 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1869 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 1870 __ lwz(R11_scratch1, simm16_offs, tmp); 1871 __ addi(R11_scratch1, R11_scratch1, 1); 1872 __ stw(R11_scratch1, simm16_offs, tmp); 1873 } 1874 #endif 1875 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 1876 1877 __ nand(tmp, R3_RET, R3_RET); 1878 __ subf(length, tmp, length); 1879 __ add(src_pos, tmp, src_pos); 1880 __ add(dst_pos, tmp, dst_pos); 1881 1882 __ cmpwi(CCR0, R3_RET, 0); 1883 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry()); 1884 __ bind(*stub->continuation()); 1885 return; 1886 } 1887 1888 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1889 Label cont, slow, copyfunc; 1890 1891 bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | 1892 LIR_OpArrayCopy::dst_null_check | 1893 LIR_OpArrayCopy::src_pos_positive_check | 1894 LIR_OpArrayCopy::dst_pos_positive_check | 1895 LIR_OpArrayCopy::length_positive_check); 1896 1897 // Use only one conditional branch for simple checks. 1898 if (simple_check_flag_set) { 1899 ConditionRegister combined_check = CCR1, tmp_check = CCR1; 1900 1901 // Make sure src and dst are non-null. 1902 if (flags & LIR_OpArrayCopy::src_null_check) { 1903 __ cmpdi(combined_check, src, 0); 1904 tmp_check = CCR0; 1905 } 1906 1907 if (flags & LIR_OpArrayCopy::dst_null_check) { 1908 __ cmpdi(tmp_check, dst, 0); 1909 if (tmp_check != combined_check) { 1910 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal); 1911 } 1912 tmp_check = CCR0; 1913 } 1914 1915 // Clear combined_check.eq if not already used. 1916 if (tmp_check == combined_check) { 1917 __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal); 1918 tmp_check = CCR0; 1919 } 1920 1921 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1922 // Test src_pos register. 1923 __ cmpwi(tmp_check, src_pos, 0); 1924 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1925 } 1926 1927 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1928 // Test dst_pos register. 1929 __ cmpwi(tmp_check, dst_pos, 0); 1930 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1931 } 1932 1933 if (flags & LIR_OpArrayCopy::length_positive_check) { 1934 // Make sure length isn't negative. 1935 __ cmpwi(tmp_check, length, 0); 1936 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1937 } 1938 1939 __ beq(combined_check, slow); 1940 } 1941 1942 // If the compiler was not able to prove that exact type of the source or the destination 1943 // of the arraycopy is an array type, check at runtime if the source or the destination is 1944 // an instance type. 1945 if (flags & LIR_OpArrayCopy::type_check) { 1946 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 1947 __ load_klass(tmp, dst); 1948 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1949 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1950 __ bge(CCR0, slow); 1951 } 1952 1953 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 1954 __ load_klass(tmp, src); 1955 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1956 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1957 __ bge(CCR0, slow); 1958 } 1959 } 1960 1961 // Higher 32bits must be null. 1962 __ extsw(length, length); 1963 1964 __ extsw(src_pos, src_pos); 1965 if (flags & LIR_OpArrayCopy::src_range_check) { 1966 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src); 1967 __ add(tmp, length, src_pos); 1968 __ cmpld(CCR0, tmp2, tmp); 1969 __ ble(CCR0, slow); 1970 } 1971 1972 __ extsw(dst_pos, dst_pos); 1973 if (flags & LIR_OpArrayCopy::dst_range_check) { 1974 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst); 1975 __ add(tmp, length, dst_pos); 1976 __ cmpld(CCR0, tmp2, tmp); 1977 __ ble(CCR0, slow); 1978 } 1979 1980 int shift = shift_amount(basic_type); 1981 1982 if (!(flags & LIR_OpArrayCopy::type_check)) { 1983 __ b(cont); 1984 } else { 1985 // We don't know the array types are compatible. 1986 if (basic_type != T_OBJECT) { 1987 // Simple test for basic type arrays. 1988 if (UseCompressedClassPointers) { 1989 // We don't need decode because we just need to compare. 1990 __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); 1991 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 1992 __ cmpw(CCR0, tmp, tmp2); 1993 } else { 1994 __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); 1995 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 1996 __ cmpd(CCR0, tmp, tmp2); 1997 } 1998 __ beq(CCR0, cont); 1999 } else { 2000 // For object arrays, if src is a sub class of dst then we can 2001 // safely do the copy. 2002 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2003 2004 const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf 2005 assert_different_registers(tmp, tmp2, sub_klass, super_klass); 2006 2007 __ load_klass(sub_klass, src); 2008 __ load_klass(super_klass, dst); 2009 2010 __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, 2011 &cont, copyfunc_addr != NULL ? ©func : &slow, NULL); 2012 2013 address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2014 //__ load_const_optimized(tmp, slow_stc, tmp2); 2015 __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); 2016 __ mtctr(tmp); 2017 __ bctrl(); // sets CR0 2018 __ beq(CCR0, cont); 2019 2020 if (copyfunc_addr != NULL) { // Use stub if available. 2021 __ bind(copyfunc); 2022 // Src is not a sub class of dst so we have to do a 2023 // per-element check. 2024 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2025 if ((flags & mask) != mask) { 2026 assert(flags & mask, "one of the two should be known to be an object array"); 2027 2028 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2029 __ load_klass(tmp, src); 2030 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2031 __ load_klass(tmp, dst); 2032 } 2033 2034 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 2035 2036 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2037 __ load_const_optimized(tmp, objArray_lh); 2038 __ cmpw(CCR0, tmp, tmp2); 2039 __ bne(CCR0, slow); 2040 } 2041 2042 Register src_ptr = R3_ARG1; 2043 Register dst_ptr = R4_ARG2; 2044 Register len = R5_ARG3; 2045 Register chk_off = R6_ARG4; 2046 Register super_k = R7_ARG5; 2047 2048 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2049 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2050 if (shift == 0) { 2051 __ add(src_ptr, src_pos, src_ptr); 2052 __ add(dst_ptr, dst_pos, dst_ptr); 2053 } else { 2054 __ sldi(tmp, src_pos, shift); 2055 __ sldi(tmp2, dst_pos, shift); 2056 __ add(src_ptr, tmp, src_ptr); 2057 __ add(dst_ptr, tmp2, dst_ptr); 2058 } 2059 2060 __ load_klass(tmp, dst); 2061 __ mr(len, length); 2062 2063 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2064 __ ld(super_k, ek_offset, tmp); 2065 2066 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2067 __ lwz(chk_off, sco_offset, super_k); 2068 2069 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 2070 2071 #ifndef PRODUCT 2072 if (PrintC1Statistics) { 2073 Label failed; 2074 __ cmpwi(CCR0, R3_RET, 0); 2075 __ bne(CCR0, failed); 2076 address counter = (address)&Runtime1::_arraycopy_checkcast_cnt; 2077 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2078 __ lwz(R11_scratch1, simm16_offs, tmp); 2079 __ addi(R11_scratch1, R11_scratch1, 1); 2080 __ stw(R11_scratch1, simm16_offs, tmp); 2081 __ bind(failed); 2082 } 2083 #endif 2084 2085 __ nand(tmp, R3_RET, R3_RET); 2086 __ cmpwi(CCR0, R3_RET, 0); 2087 __ beq(CCR0, *stub->continuation()); 2088 2089 #ifndef PRODUCT 2090 if (PrintC1Statistics) { 2091 address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt; 2092 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2093 __ lwz(R11_scratch1, simm16_offs, tmp); 2094 __ addi(R11_scratch1, R11_scratch1, 1); 2095 __ stw(R11_scratch1, simm16_offs, tmp); 2096 } 2097 #endif 2098 2099 __ subf(length, tmp, length); 2100 __ add(src_pos, tmp, src_pos); 2101 __ add(dst_pos, tmp, dst_pos); 2102 } 2103 } 2104 } 2105 __ bind(slow); 2106 __ b(*stub->entry()); 2107 __ bind(cont); 2108 2109 #ifdef ASSERT 2110 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2111 // Sanity check the known type with the incoming class. For the 2112 // primitive case the types must match exactly with src.klass and 2113 // dst.klass each exactly matching the default type. For the 2114 // object array case, if no type check is needed then either the 2115 // dst type is exactly the expected type and the src type is a 2116 // subtype which we can't check or src is the same array as dst 2117 // but not necessarily exactly of type default_type. 2118 Label known_ok, halt; 2119 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2120 if (UseCompressedClassPointers) { 2121 // Tmp holds the default type. It currently comes uncompressed after the 2122 // load of a constant, so encode it. 2123 __ encode_klass_not_null(tmp); 2124 // Load the raw value of the dst klass, since we will be comparing 2125 // uncompressed values directly. 2126 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2127 __ cmpw(CCR0, tmp, tmp2); 2128 if (basic_type != T_OBJECT) { 2129 __ bne(CCR0, halt); 2130 // Load the raw value of the src klass. 2131 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); 2132 __ cmpw(CCR0, tmp, tmp2); 2133 __ beq(CCR0, known_ok); 2134 } else { 2135 __ beq(CCR0, known_ok); 2136 __ cmpw(CCR0, src, dst); 2137 __ beq(CCR0, known_ok); 2138 } 2139 } else { 2140 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2141 __ cmpd(CCR0, tmp, tmp2); 2142 if (basic_type != T_OBJECT) { 2143 __ bne(CCR0, halt); 2144 // Load the raw value of the src klass. 2145 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); 2146 __ cmpd(CCR0, tmp, tmp2); 2147 __ beq(CCR0, known_ok); 2148 } else { 2149 __ beq(CCR0, known_ok); 2150 __ cmpd(CCR0, src, dst); 2151 __ beq(CCR0, known_ok); 2152 } 2153 } 2154 __ bind(halt); 2155 __ stop("incorrect type information in arraycopy"); 2156 __ bind(known_ok); 2157 } 2158 #endif 2159 2160 #ifndef PRODUCT 2161 if (PrintC1Statistics) { 2162 address counter = Runtime1::arraycopy_count_address(basic_type); 2163 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2164 __ lwz(R11_scratch1, simm16_offs, tmp); 2165 __ addi(R11_scratch1, R11_scratch1, 1); 2166 __ stw(R11_scratch1, simm16_offs, tmp); 2167 } 2168 #endif 2169 2170 Register src_ptr = R3_ARG1; 2171 Register dst_ptr = R4_ARG2; 2172 Register len = R5_ARG3; 2173 2174 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2175 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2176 if (shift == 0) { 2177 __ add(src_ptr, src_pos, src_ptr); 2178 __ add(dst_ptr, dst_pos, dst_ptr); 2179 } else { 2180 __ sldi(tmp, src_pos, shift); 2181 __ sldi(tmp2, dst_pos, shift); 2182 __ add(src_ptr, tmp, src_ptr); 2183 __ add(dst_ptr, tmp2, dst_ptr); 2184 } 2185 2186 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2187 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2188 const char *name; 2189 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2190 2191 // Arraycopy stubs takes a length in number of elements, so don't scale it. 2192 __ mr(len, length); 2193 __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0); 2194 2195 __ bind(*stub->continuation()); 2196 } 2197 2198 2199 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2200 if (dest->is_single_cpu()) { 2201 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5); 2202 #ifdef _LP64 2203 if (left->type() == T_OBJECT) { 2204 switch (code) { 2205 case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break; 2206 case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break; 2207 case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break; 2208 default: ShouldNotReachHere(); 2209 } 2210 } else 2211 #endif 2212 switch (code) { 2213 case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2214 case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2215 case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2216 default: ShouldNotReachHere(); 2217 } 2218 } else { 2219 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6); 2220 switch (code) { 2221 case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2222 case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2223 case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2224 default: ShouldNotReachHere(); 2225 } 2226 } 2227 } 2228 2229 2230 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2231 #ifdef _LP64 2232 if (left->type() == T_OBJECT) { 2233 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2234 if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); } 2235 else { 2236 switch (code) { 2237 case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break; 2238 case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break; 2239 case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break; 2240 default: ShouldNotReachHere(); 2241 } 2242 } 2243 return; 2244 } 2245 #endif 2246 2247 if (dest->is_single_cpu()) { 2248 count = count & 0x1F; // Java spec 2249 if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); } 2250 else { 2251 switch (code) { 2252 case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break; 2253 case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break; 2254 case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break; 2255 default: ShouldNotReachHere(); 2256 } 2257 } 2258 } else if (dest->is_double_cpu()) { 2259 count = count & 63; // Java spec 2260 if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); } 2261 else { 2262 switch (code) { 2263 case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2264 case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2265 case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2266 default: ShouldNotReachHere(); 2267 } 2268 } 2269 } else { 2270 ShouldNotReachHere(); 2271 } 2272 } 2273 2274 2275 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2276 if (op->init_check()) { 2277 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2278 explicit_null_check(op->klass()->as_register(), op->stub()->info()); 2279 } else { 2280 add_debug_info_for_null_check_here(op->stub()->info()); 2281 } 2282 __ lbz(op->tmp1()->as_register(), 2283 in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register()); 2284 __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2285 __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry()); 2286 } 2287 __ allocate_object(op->obj()->as_register(), 2288 op->tmp1()->as_register(), 2289 op->tmp2()->as_register(), 2290 op->tmp3()->as_register(), 2291 op->header_size(), 2292 op->object_size(), 2293 op->klass()->as_register(), 2294 *op->stub()->entry()); 2295 2296 __ bind(*op->stub()->continuation()); 2297 __ verify_oop(op->obj()->as_register()); 2298 } 2299 2300 2301 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2302 LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); ) 2303 if (UseSlowPath || 2304 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2305 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2306 __ b(*op->stub()->entry()); 2307 } else { 2308 __ allocate_array(op->obj()->as_register(), 2309 op->len()->as_register(), 2310 op->tmp1()->as_register(), 2311 op->tmp2()->as_register(), 2312 op->tmp3()->as_register(), 2313 arrayOopDesc::header_size(op->type()), 2314 type2aelembytes(op->type()), 2315 op->klass()->as_register(), 2316 *op->stub()->entry()); 2317 } 2318 __ bind(*op->stub()->continuation()); 2319 } 2320 2321 2322 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2323 ciMethodData *md, ciProfileData *data, 2324 Register recv, Register tmp1, Label* update_done) { 2325 uint i; 2326 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2327 Label next_test; 2328 // See if the receiver is receiver[n]. 2329 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2330 __ verify_klass_ptr(tmp1); 2331 __ cmpd(CCR0, recv, tmp1); 2332 __ bne(CCR0, next_test); 2333 2334 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2335 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2336 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2337 __ b(*update_done); 2338 2339 __ bind(next_test); 2340 } 2341 2342 // Didn't find receiver; find next empty slot and fill it in. 2343 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2344 Label next_test; 2345 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2346 __ cmpdi(CCR0, tmp1, 0); 2347 __ bne(CCR0, next_test); 2348 __ li(tmp1, DataLayout::counter_increment); 2349 __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2350 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2351 __ b(*update_done); 2352 2353 __ bind(next_test); 2354 } 2355 } 2356 2357 2358 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2359 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2360 md = method->method_data_or_null(); 2361 assert(md != NULL, "Sanity"); 2362 data = md->bci_to_data(bci); 2363 assert(data != NULL, "need data for checkcast"); 2364 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2365 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2366 // The offset is large so bias the mdo by the base of the slot so 2367 // that the ld can use simm16s to reference the slots of the data. 2368 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2369 } 2370 } 2371 2372 2373 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2374 const Register obj = op->object()->as_register(); // Needs to live in this register at safepoint (patching stub). 2375 Register k_RInfo = op->tmp1()->as_register(); 2376 Register klass_RInfo = op->tmp2()->as_register(); 2377 Register Rtmp1 = op->tmp3()->as_register(); 2378 Register dst = op->result_opr()->as_register(); 2379 ciKlass* k = op->klass(); 2380 bool should_profile = op->should_profile(); 2381 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps. 2382 bool reg_conflict = false; 2383 if (obj == k_RInfo) { 2384 k_RInfo = dst; 2385 reg_conflict = true; 2386 } else if (obj == klass_RInfo) { 2387 klass_RInfo = dst; 2388 reg_conflict = true; 2389 } else if (obj == Rtmp1) { 2390 Rtmp1 = dst; 2391 reg_conflict = true; 2392 } 2393 assert_different_registers(obj, k_RInfo, klass_RInfo, Rtmp1); 2394 2395 __ cmpdi(CCR0, obj, 0); 2396 2397 ciMethodData* md = NULL; 2398 ciProfileData* data = NULL; 2399 int mdo_offset_bias = 0; 2400 if (should_profile) { 2401 ciMethod* method = op->profiled_method(); 2402 assert(method != NULL, "Should have method"); 2403 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2404 2405 Register mdo = k_RInfo; 2406 Register data_val = Rtmp1; 2407 Label not_null; 2408 __ bne(CCR0, not_null); 2409 metadata2reg(md->constant_encoding(), mdo); 2410 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2411 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2412 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2413 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2414 __ b(*obj_is_null); 2415 __ bind(not_null); 2416 } else { 2417 __ beq(CCR0, *obj_is_null); 2418 } 2419 2420 // get object class 2421 __ load_klass(klass_RInfo, obj); 2422 2423 if (k->is_loaded()) { 2424 metadata2reg(k->constant_encoding(), k_RInfo); 2425 } else { 2426 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2427 } 2428 2429 Label profile_cast_failure, failure_restore_obj, profile_cast_success; 2430 Label *failure_target = should_profile ? &profile_cast_failure : failure; 2431 Label *success_target = should_profile ? &profile_cast_success : success; 2432 2433 if (op->fast_check()) { 2434 assert_different_registers(klass_RInfo, k_RInfo); 2435 __ cmpd(CCR0, k_RInfo, klass_RInfo); 2436 if (should_profile) { 2437 __ bne(CCR0, *failure_target); 2438 // Fall through to success case. 2439 } else { 2440 __ beq(CCR0, *success); 2441 // Fall through to failure case. 2442 } 2443 } else { 2444 bool need_slow_path = true; 2445 if (k->is_loaded()) { 2446 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) { 2447 need_slow_path = false; 2448 } 2449 // Perform the fast part of the checking logic. 2450 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), 2451 failure_target, NULL, RegisterOrConstant(k->super_check_offset())); 2452 } else { 2453 // Perform the fast part of the checking logic. 2454 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); 2455 } 2456 if (!need_slow_path) { 2457 if (!should_profile) { __ b(*success); } 2458 } else { 2459 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2460 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2461 // Stub needs fixed registers (tmp1-3). 2462 Register original_k_RInfo = op->tmp1()->as_register(); 2463 Register original_klass_RInfo = op->tmp2()->as_register(); 2464 Register original_Rtmp1 = op->tmp3()->as_register(); 2465 bool keep_obj_alive = reg_conflict && (op->code() == lir_checkcast); 2466 bool keep_klass_RInfo_alive = (obj == original_klass_RInfo) && should_profile; 2467 if (keep_obj_alive && (obj != original_Rtmp1)) { __ mr(R0, obj); } 2468 __ mr_if_needed(original_k_RInfo, k_RInfo); 2469 __ mr_if_needed(original_klass_RInfo, klass_RInfo); 2470 if (keep_obj_alive) { __ mr(dst, (obj == original_Rtmp1) ? obj : R0); } 2471 //__ load_const_optimized(original_Rtmp1, entry, R0); 2472 __ calculate_address_from_global_toc(original_Rtmp1, entry, true, true, false); 2473 __ mtctr(original_Rtmp1); 2474 __ bctrl(); // sets CR0 2475 if (keep_obj_alive) { 2476 if (keep_klass_RInfo_alive) { __ mr(R0, obj); } 2477 __ mr(obj, dst); 2478 } 2479 if (should_profile) { 2480 __ bne(CCR0, *failure_target); 2481 if (keep_klass_RInfo_alive) { __ mr(klass_RInfo, keep_obj_alive ? R0 : obj); } 2482 // Fall through to success case. 2483 } else { 2484 __ beq(CCR0, *success); 2485 // Fall through to failure case. 2486 } 2487 } 2488 } 2489 2490 if (should_profile) { 2491 Register mdo = k_RInfo, recv = klass_RInfo; 2492 assert_different_registers(mdo, recv, Rtmp1); 2493 __ bind(profile_cast_success); 2494 metadata2reg(md->constant_encoding(), mdo); 2495 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2496 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success); 2497 __ b(*success); 2498 2499 // Cast failure case. 2500 __ bind(profile_cast_failure); 2501 metadata2reg(md->constant_encoding(), mdo); 2502 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2503 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2504 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment); 2505 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2506 } 2507 2508 __ bind(*failure); 2509 } 2510 2511 2512 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2513 LIR_Code code = op->code(); 2514 if (code == lir_store_check) { 2515 Register value = op->object()->as_register(); 2516 Register array = op->array()->as_register(); 2517 Register k_RInfo = op->tmp1()->as_register(); 2518 Register klass_RInfo = op->tmp2()->as_register(); 2519 Register Rtmp1 = op->tmp3()->as_register(); 2520 bool should_profile = op->should_profile(); 2521 2522 __ verify_oop(value); 2523 CodeStub* stub = op->stub(); 2524 // Check if it needs to be profiled. 2525 ciMethodData* md = NULL; 2526 ciProfileData* data = NULL; 2527 int mdo_offset_bias = 0; 2528 if (should_profile) { 2529 ciMethod* method = op->profiled_method(); 2530 assert(method != NULL, "Should have method"); 2531 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2532 } 2533 Label profile_cast_success, failure, done; 2534 Label *success_target = should_profile ? &profile_cast_success : &done; 2535 2536 __ cmpdi(CCR0, value, 0); 2537 if (should_profile) { 2538 Label not_null; 2539 __ bne(CCR0, not_null); 2540 Register mdo = k_RInfo; 2541 Register data_val = Rtmp1; 2542 metadata2reg(md->constant_encoding(), mdo); 2543 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2544 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2545 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2546 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2547 __ b(done); 2548 __ bind(not_null); 2549 } else { 2550 __ beq(CCR0, done); 2551 } 2552 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2553 explicit_null_check(array, op->info_for_exception()); 2554 } else { 2555 add_debug_info_for_null_check_here(op->info_for_exception()); 2556 } 2557 __ load_klass(k_RInfo, array); 2558 __ load_klass(klass_RInfo, value); 2559 2560 // Get instance klass. 2561 __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); 2562 // Perform the fast part of the checking logic. 2563 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); 2564 2565 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2566 const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2567 //__ load_const_optimized(R0, slow_path); 2568 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); 2569 __ mtctr(R0); 2570 __ bctrl(); // sets CR0 2571 if (!should_profile) { 2572 __ beq(CCR0, done); 2573 __ bind(failure); 2574 } else { 2575 __ bne(CCR0, failure); 2576 // Fall through to the success case. 2577 2578 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2579 assert_different_registers(value, mdo, recv, tmp1); 2580 __ bind(profile_cast_success); 2581 metadata2reg(md->constant_encoding(), mdo); 2582 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2583 __ load_klass(recv, value); 2584 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2585 __ b(done); 2586 2587 // Cast failure case. 2588 __ bind(failure); 2589 metadata2reg(md->constant_encoding(), mdo); 2590 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2591 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2592 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2593 __ addi(tmp1, tmp1, -DataLayout::counter_increment); 2594 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2595 } 2596 __ b(*stub->entry()); 2597 __ bind(done); 2598 2599 } else if (code == lir_checkcast) { 2600 Label success, failure; 2601 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); 2602 __ b(*op->stub()->entry()); 2603 __ align(32, 12); 2604 __ bind(success); 2605 __ mr_if_needed(op->result_opr()->as_register(), op->object()->as_register()); 2606 } else if (code == lir_instanceof) { 2607 Register dst = op->result_opr()->as_register(); 2608 Label success, failure, done; 2609 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure); 2610 __ li(dst, 0); 2611 __ b(done); 2612 __ align(32, 12); 2613 __ bind(success); 2614 __ li(dst, 1); 2615 __ bind(done); 2616 } else { 2617 ShouldNotReachHere(); 2618 } 2619 } 2620 2621 2622 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2623 Register addr = op->addr()->as_pointer_register(); 2624 Register cmp_value = noreg, new_value = noreg; 2625 bool is_64bit = false; 2626 2627 if (op->code() == lir_cas_long) { 2628 cmp_value = op->cmp_value()->as_register_lo(); 2629 new_value = op->new_value()->as_register_lo(); 2630 is_64bit = true; 2631 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2632 cmp_value = op->cmp_value()->as_register(); 2633 new_value = op->new_value()->as_register(); 2634 if (op->code() == lir_cas_obj) { 2635 if (UseCompressedOops) { 2636 Register t1 = op->tmp1()->as_register(); 2637 Register t2 = op->tmp2()->as_register(); 2638 cmp_value = __ encode_heap_oop(t1, cmp_value); 2639 new_value = __ encode_heap_oop(t2, new_value); 2640 } else { 2641 is_64bit = true; 2642 } 2643 } 2644 } else { 2645 Unimplemented(); 2646 } 2647 2648 if (is_64bit) { 2649 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2650 MacroAssembler::MemBarNone, 2651 MacroAssembler::cmpxchgx_hint_atomic_update(), 2652 noreg, NULL, /*check without ldarx first*/true); 2653 } else { 2654 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2655 MacroAssembler::MemBarNone, 2656 MacroAssembler::cmpxchgx_hint_atomic_update(), 2657 noreg, /*check without ldarx first*/true); 2658 } 2659 2660 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2661 __ isync(); 2662 } else { 2663 __ sync(); 2664 } 2665 } 2666 2667 2668 void LIR_Assembler::set_24bit_FPU() { 2669 Unimplemented(); 2670 } 2671 2672 void LIR_Assembler::reset_FPU() { 2673 Unimplemented(); 2674 } 2675 2676 2677 void LIR_Assembler::breakpoint() { 2678 __ illtrap(); 2679 } 2680 2681 2682 void LIR_Assembler::push(LIR_Opr opr) { 2683 Unimplemented(); 2684 } 2685 2686 void LIR_Assembler::pop(LIR_Opr opr) { 2687 Unimplemented(); 2688 } 2689 2690 2691 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2692 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2693 Register dst = dst_opr->as_register(); 2694 Register reg = mon_addr.base(); 2695 int offset = mon_addr.disp(); 2696 // Compute pointer to BasicLock. 2697 __ add_const_optimized(dst, reg, offset); 2698 } 2699 2700 2701 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2702 Register obj = op->obj_opr()->as_register(); 2703 Register hdr = op->hdr_opr()->as_register(); 2704 Register lock = op->lock_opr()->as_register(); 2705 2706 // Obj may not be an oop. 2707 if (op->code() == lir_lock) { 2708 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2709 if (UseFastLocking) { 2710 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2711 // Add debug info for NullPointerException only if one is possible. 2712 if (op->info() != NULL) { 2713 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2714 explicit_null_check(obj, op->info()); 2715 } else { 2716 add_debug_info_for_null_check_here(op->info()); 2717 } 2718 } 2719 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2720 } else { 2721 // always do slow locking 2722 // note: The slow locking code could be inlined here, however if we use 2723 // slow locking, speed doesn't matter anyway and this solution is 2724 // simpler and requires less duplicated code - additionally, the 2725 // slow locking code is the same in either case which simplifies 2726 // debugging. 2727 __ b(*op->stub()->entry()); 2728 } 2729 } else { 2730 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2731 if (UseFastLocking) { 2732 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2733 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2734 } else { 2735 // always do slow unlocking 2736 // note: The slow unlocking code could be inlined here, however if we use 2737 // slow unlocking, speed doesn't matter anyway and this solution is 2738 // simpler and requires less duplicated code - additionally, the 2739 // slow unlocking code is the same in either case which simplifies 2740 // debugging. 2741 __ b(*op->stub()->entry()); 2742 } 2743 } 2744 __ bind(*op->stub()->continuation()); 2745 } 2746 2747 2748 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2749 ciMethod* method = op->profiled_method(); 2750 int bci = op->profiled_bci(); 2751 ciMethod* callee = op->profiled_callee(); 2752 2753 // Update counter for all call types. 2754 ciMethodData* md = method->method_data_or_null(); 2755 assert(md != NULL, "Sanity"); 2756 ciProfileData* data = md->bci_to_data(bci); 2757 assert(data != NULL && data->is_CounterData(), "need CounterData for calls"); 2758 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2759 Register mdo = op->mdo()->as_register(); 2760 #ifdef _LP64 2761 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2762 Register tmp1 = op->tmp1()->as_register_lo(); 2763 #else 2764 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2765 Register tmp1 = op->tmp1()->as_register(); 2766 #endif 2767 metadata2reg(md->constant_encoding(), mdo); 2768 int mdo_offset_bias = 0; 2769 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2770 data->size_in_bytes())) { 2771 // The offset is large so bias the mdo by the base of the slot so 2772 // that the ld can use simm16s to reference the slots of the data. 2773 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2774 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2775 } 2776 2777 // Perform additional virtual call profiling for invokevirtual and 2778 // invokeinterface bytecodes 2779 if (op->should_profile_receiver_type()) { 2780 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2781 Register recv = op->recv()->as_register(); 2782 assert_different_registers(mdo, tmp1, recv); 2783 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2784 ciKlass* known_klass = op->known_holder(); 2785 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2786 // We know the type that will be seen at this call site; we can 2787 // statically update the MethodData* rather than needing to do 2788 // dynamic tests on the receiver type. 2789 2790 // NOTE: we should probably put a lock around this search to 2791 // avoid collisions by concurrent compilations. 2792 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2793 uint i; 2794 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2795 ciKlass* receiver = vc_data->receiver(i); 2796 if (known_klass->equals(receiver)) { 2797 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2798 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2799 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2800 return; 2801 } 2802 } 2803 2804 // Receiver type not found in profile data; select an empty slot. 2805 2806 // Note that this is less efficient than it should be because it 2807 // always does a write to the receiver part of the 2808 // VirtualCallData rather than just the first time. 2809 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2810 ciKlass* receiver = vc_data->receiver(i); 2811 if (receiver == NULL) { 2812 metadata2reg(known_klass->constant_encoding(), tmp1); 2813 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); 2814 2815 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2816 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2817 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2818 return; 2819 } 2820 } 2821 } else { 2822 __ load_klass(recv, recv); 2823 Label update_done; 2824 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2825 // Receiver did not match any saved receiver and there is no empty row for it. 2826 // Increment total counter to indicate polymorphic case. 2827 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2828 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2829 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2830 2831 __ bind(update_done); 2832 } 2833 } else { 2834 // Static call 2835 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2836 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2837 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2838 } 2839 } 2840 2841 2842 void LIR_Assembler::align_backward_branch_target() { 2843 __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary. 2844 } 2845 2846 2847 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2848 Unimplemented(); 2849 } 2850 2851 2852 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) { 2853 // tmp must be unused 2854 assert(tmp->is_illegal(), "wasting a register if tmp is allocated"); 2855 assert(left->is_register(), "can only handle registers"); 2856 2857 if (left->is_single_cpu()) { 2858 __ neg(dest->as_register(), left->as_register()); 2859 } else if (left->is_single_fpu()) { 2860 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2861 } else if (left->is_double_fpu()) { 2862 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2863 } else { 2864 assert (left->is_double_cpu(), "Must be a long"); 2865 __ neg(dest->as_register_lo(), left->as_register_lo()); 2866 } 2867 } 2868 2869 2870 void LIR_Assembler::fxch(int i) { 2871 Unimplemented(); 2872 } 2873 2874 void LIR_Assembler::fld(int i) { 2875 Unimplemented(); 2876 } 2877 2878 void LIR_Assembler::ffree(int i) { 2879 Unimplemented(); 2880 } 2881 2882 2883 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2884 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2885 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2886 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2887 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2888 //__ load_const_optimized(R0, dest); 2889 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2890 __ mtctr(R0); 2891 __ bctrl(); 2892 assert(info != NULL, "sanity"); 2893 add_call_info_here(info); 2894 return; 2895 } 2896 2897 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2898 if (info != NULL) { 2899 add_call_info_here(info); 2900 } 2901 } 2902 2903 2904 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2905 ShouldNotReachHere(); // Not needed on _LP64. 2906 } 2907 2908 void LIR_Assembler::membar() { 2909 __ fence(); 2910 } 2911 2912 void LIR_Assembler::membar_acquire() { 2913 __ acquire(); 2914 } 2915 2916 void LIR_Assembler::membar_release() { 2917 __ release(); 2918 } 2919 2920 void LIR_Assembler::membar_loadload() { 2921 __ membar(Assembler::LoadLoad); 2922 } 2923 2924 void LIR_Assembler::membar_storestore() { 2925 __ membar(Assembler::StoreStore); 2926 } 2927 2928 void LIR_Assembler::membar_loadstore() { 2929 __ membar(Assembler::LoadStore); 2930 } 2931 2932 void LIR_Assembler::membar_storeload() { 2933 __ membar(Assembler::StoreLoad); 2934 } 2935 2936 void LIR_Assembler::on_spin_wait() { 2937 Unimplemented(); 2938 } 2939 2940 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 2941 assert(patch_code == lir_patch_none, "Patch code not supported"); 2942 LIR_Address* addr = addr_opr->as_address_ptr(); 2943 assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); 2944 if (addr->index()->is_illegal()) { 2945 __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp()); 2946 } else { 2947 assert(addr->disp() == 0, "can't have both: index and disp"); 2948 __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register()); 2949 } 2950 } 2951 2952 2953 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2954 ShouldNotReachHere(); 2955 } 2956 2957 2958 #ifdef ASSERT 2959 // Emit run-time assertion. 2960 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2961 Unimplemented(); 2962 } 2963 #endif 2964 2965 2966 void LIR_Assembler::peephole(LIR_List* lir) { 2967 // Optimize instruction pairs before emitting. 2968 LIR_OpList* inst = lir->instructions_list(); 2969 for (int i = 1; i < inst->length(); i++) { 2970 LIR_Op* op = inst->at(i); 2971 2972 // 2 register-register-moves 2973 if (op->code() == lir_move) { 2974 LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(), 2975 res2 = ((LIR_Op1*)op)->result_opr(); 2976 if (in2->is_register() && res2->is_register()) { 2977 LIR_Op* prev = inst->at(i - 1); 2978 if (prev && prev->code() == lir_move) { 2979 LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(), 2980 res1 = ((LIR_Op1*)prev)->result_opr(); 2981 if (in1->is_same_register(res2) && in2->is_same_register(res1)) { 2982 inst->remove_at(i); 2983 } 2984 } 2985 } 2986 } 2987 2988 } 2989 return; 2990 } 2991 2992 2993 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2994 const LIR_Address *addr = src->as_address_ptr(); 2995 assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!"); 2996 const Register Rptr = addr->base()->as_pointer_register(), 2997 Rtmp = tmp->as_register(); 2998 Register Rco = noreg; 2999 if (UseCompressedOops && data->is_oop()) { 3000 Rco = __ encode_heap_oop(Rtmp, data->as_register()); 3001 } 3002 3003 Label Lretry; 3004 __ bind(Lretry); 3005 3006 if (data->type() == T_INT) { 3007 const Register Rold = dest->as_register(), 3008 Rsrc = data->as_register(); 3009 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3010 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3011 if (code == lir_xadd) { 3012 __ add(Rtmp, Rsrc, Rold); 3013 __ stwcx_(Rtmp, Rptr); 3014 } else { 3015 __ stwcx_(Rsrc, Rptr); 3016 } 3017 } else if (data->is_oop()) { 3018 assert(code == lir_xchg, "xadd for oops"); 3019 const Register Rold = dest->as_register(); 3020 if (UseCompressedOops) { 3021 assert_different_registers(Rptr, Rold, Rco); 3022 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3023 __ stwcx_(Rco, Rptr); 3024 } else { 3025 const Register Robj = data->as_register(); 3026 assert_different_registers(Rptr, Rold, Robj); 3027 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3028 __ stdcx_(Robj, Rptr); 3029 } 3030 } else if (data->type() == T_LONG) { 3031 const Register Rold = dest->as_register_lo(), 3032 Rsrc = data->as_register_lo(); 3033 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3034 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3035 if (code == lir_xadd) { 3036 __ add(Rtmp, Rsrc, Rold); 3037 __ stdcx_(Rtmp, Rptr); 3038 } else { 3039 __ stdcx_(Rsrc, Rptr); 3040 } 3041 } else { 3042 ShouldNotReachHere(); 3043 } 3044 3045 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 3046 __ bne_predict_not_taken(CCR0, Lretry); 3047 } else { 3048 __ bne( CCR0, Lretry); 3049 } 3050 3051 if (UseCompressedOops && data->is_oop()) { 3052 __ decode_heap_oop(dest->as_register()); 3053 } 3054 } 3055 3056 3057 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3058 Register obj = op->obj()->as_register(); 3059 Register tmp = op->tmp()->as_pointer_register(); 3060 LIR_Address* mdo_addr = op->mdp()->as_address_ptr(); 3061 ciKlass* exact_klass = op->exact_klass(); 3062 intptr_t current_klass = op->current_klass(); 3063 bool not_null = op->not_null(); 3064 bool no_conflict = op->no_conflict(); 3065 3066 Label Lupdate, Ldo_update, Ldone; 3067 3068 bool do_null = !not_null; 3069 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3070 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3071 3072 assert(do_null || do_update, "why are we here?"); 3073 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3074 3075 __ verify_oop(obj); 3076 3077 if (do_null) { 3078 if (!TypeEntries::was_null_seen(current_klass)) { 3079 __ cmpdi(CCR0, obj, 0); 3080 __ bne(CCR0, Lupdate); 3081 __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3082 __ ori(R0, R0, TypeEntries::null_seen); 3083 if (do_update) { 3084 __ b(Ldo_update); 3085 } else { 3086 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3087 } 3088 } else { 3089 if (do_update) { 3090 __ cmpdi(CCR0, obj, 0); 3091 __ beq(CCR0, Ldone); 3092 } 3093 } 3094 #ifdef ASSERT 3095 } else { 3096 __ cmpdi(CCR0, obj, 0); 3097 __ bne(CCR0, Lupdate); 3098 __ stop("unexpect null obj", 0x9652); 3099 #endif 3100 } 3101 3102 __ bind(Lupdate); 3103 if (do_update) { 3104 Label Lnext; 3105 const Register klass = R29_TOC; // kill and reload 3106 bool klass_reg_used = false; 3107 #ifdef ASSERT 3108 if (exact_klass != NULL) { 3109 Label ok; 3110 klass_reg_used = true; 3111 __ load_klass(klass, obj); 3112 metadata2reg(exact_klass->constant_encoding(), R0); 3113 __ cmpd(CCR0, klass, R0); 3114 __ beq(CCR0, ok); 3115 __ stop("exact klass and actual klass differ", 0x8564); 3116 __ bind(ok); 3117 } 3118 #endif 3119 3120 if (!no_conflict) { 3121 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3122 klass_reg_used = true; 3123 if (exact_klass != NULL) { 3124 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3125 metadata2reg(exact_klass->constant_encoding(), klass); 3126 } else { 3127 __ load_klass(klass, obj); 3128 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj 3129 } 3130 3131 // Like InterpreterMacroAssembler::profile_obj_type 3132 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3133 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3134 __ cmpd(CCR1, R0, klass); 3135 // Klass seen before, nothing to do (regardless of unknown bit). 3136 //beq(CCR1, do_nothing); 3137 3138 __ andi_(R0, klass, TypeEntries::type_unknown); 3139 // Already unknown. Nothing to do anymore. 3140 //bne(CCR0, do_nothing); 3141 __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 3142 __ beq(CCR0, Lnext); 3143 3144 if (TypeEntries::is_type_none(current_klass)) { 3145 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3146 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3147 __ beq(CCR0, Ldo_update); // First time here. Set profile type. 3148 } 3149 3150 } else { 3151 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3152 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3153 3154 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3155 __ andi_(R0, tmp, TypeEntries::type_unknown); 3156 // Already unknown. Nothing to do anymore. 3157 __ bne(CCR0, Lnext); 3158 } 3159 3160 // Different than before. Cannot keep accurate profile. 3161 __ ori(R0, tmp, TypeEntries::type_unknown); 3162 } else { 3163 // There's a single possible klass at this profile point 3164 assert(exact_klass != NULL, "should be"); 3165 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3166 3167 if (TypeEntries::is_type_none(current_klass)) { 3168 klass_reg_used = true; 3169 metadata2reg(exact_klass->constant_encoding(), klass); 3170 3171 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3172 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3173 __ cmpd(CCR1, R0, klass); 3174 // Klass seen before, nothing to do (regardless of unknown bit). 3175 __ beq(CCR1, Lnext); 3176 #ifdef ASSERT 3177 { 3178 Label ok; 3179 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3180 __ beq(CCR0, ok); // First time here. 3181 3182 __ stop("unexpected profiling mismatch", 0x7865); 3183 __ bind(ok); 3184 } 3185 #endif 3186 // First time here. Set profile type. 3187 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3188 } else { 3189 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3190 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3191 3192 // Already unknown. Nothing to do anymore. 3193 __ andi_(R0, tmp, TypeEntries::type_unknown); 3194 __ bne(CCR0, Lnext); 3195 3196 // Different than before. Cannot keep accurate profile. 3197 __ ori(R0, tmp, TypeEntries::type_unknown); 3198 } 3199 } 3200 3201 __ bind(Ldo_update); 3202 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3203 3204 __ bind(Lnext); 3205 if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit 3206 } 3207 __ bind(Ldone); 3208 } 3209 3210 3211 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3212 assert(op->crc()->is_single_cpu(), "crc must be register"); 3213 assert(op->val()->is_single_cpu(), "byte value must be register"); 3214 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3215 Register crc = op->crc()->as_register(); 3216 Register val = op->val()->as_register(); 3217 Register res = op->result_opr()->as_register(); 3218 3219 assert_different_registers(val, crc, res); 3220 3221 __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); 3222 __ kernel_crc32_singleByteReg(crc, val, res, true); 3223 __ mr(res, crc); 3224 } 3225 3226 #undef __