1 /* 2 * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2017, SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/cardTableModRefBS.hpp" 37 #include "nativeInst_ppc.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "runtime/safepointMechanism.inline.hpp" 40 #include "runtime/sharedRuntime.hpp" 41 42 #define __ _masm-> 43 44 45 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5; 46 47 48 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 49 Unimplemented(); return false; // Currently not used on this platform. 50 } 51 52 53 LIR_Opr LIR_Assembler::receiverOpr() { 54 return FrameMap::R3_oop_opr; 55 } 56 57 58 LIR_Opr LIR_Assembler::osrBufferPointer() { 59 return FrameMap::R3_opr; 60 } 61 62 63 // This specifies the stack pointer decrement needed to build the frame. 64 int LIR_Assembler::initial_frame_size_in_bytes() const { 65 return in_bytes(frame_map()->framesize_in_bytes()); 66 } 67 68 69 // Inline cache check: the inline cached class is in inline_cache_reg; 70 // we fetch the class of the receiver and compare it with the cached class. 71 // If they do not match we jump to slow case. 72 int LIR_Assembler::check_icache() { 73 int offset = __ offset(); 74 __ inline_cache_check(R3_ARG1, R19_inline_cache_reg); 75 return offset; 76 } 77 78 79 void LIR_Assembler::osr_entry() { 80 // On-stack-replacement entry sequence: 81 // 82 // 1. Create a new compiled activation. 83 // 2. Initialize local variables in the compiled activation. The expression 84 // stack must be empty at the osr_bci; it is not initialized. 85 // 3. Jump to the continuation address in compiled code to resume execution. 86 87 // OSR entry point 88 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 89 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 90 ValueStack* entry_state = osr_entry->end()->state(); 91 int number_of_locks = entry_state->locks_size(); 92 93 // Create a frame for the compiled activation. 94 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 95 96 // OSR buffer is 97 // 98 // locals[nlocals-1..0] 99 // monitors[number_of_locks-1..0] 100 // 101 // Locals is a direct copy of the interpreter frame so in the osr buffer 102 // the first slot in the local array is the last local from the interpreter 103 // and the last slot is local[0] (receiver) from the interpreter. 104 // 105 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 106 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 107 // in the interpreter frame (the method lock if a sync method). 108 109 // Initialize monitors in the compiled activation. 110 // R3: pointer to osr buffer 111 // 112 // All other registers are dead at this point and the locals will be 113 // copied into place by code emitted in the IR. 114 115 Register OSR_buf = osrBufferPointer()->as_register(); 116 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 117 int monitor_offset = BytesPerWord * method()->max_locals() + 118 (2 * BytesPerWord) * (number_of_locks - 1); 119 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 120 // the OSR buffer using 2 word entries: first the lock and then 121 // the oop. 122 for (int i = 0; i < number_of_locks; i++) { 123 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 124 #ifdef ASSERT 125 // Verify the interpreter's monitor has a non-null object. 126 { 127 Label L; 128 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 129 __ cmpdi(CCR0, R0, 0); 130 __ bne(CCR0, L); 131 __ stop("locked object is NULL"); 132 __ bind(L); 133 } 134 #endif // ASSERT 135 // Copy the lock field into the compiled activation. 136 Address ml = frame_map()->address_for_monitor_lock(i), 137 mo = frame_map()->address_for_monitor_object(i); 138 assert(ml.index() == noreg && mo.index() == noreg, "sanity"); 139 __ ld(R0, slot_offset + 0, OSR_buf); 140 __ std(R0, ml.disp(), ml.base()); 141 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 142 __ std(R0, mo.disp(), mo.base()); 143 } 144 } 145 } 146 147 148 int LIR_Assembler::emit_exception_handler() { 149 // If the last instruction is a call (typically to do a throw which 150 // is coming at the end after block reordering) the return address 151 // must still point into the code area in order to avoid assertion 152 // failures when searching for the corresponding bci => add a nop 153 // (was bug 5/14/1999 - gri). 154 __ nop(); 155 156 // Generate code for the exception handler. 157 address handler_base = __ start_a_stub(exception_handler_size()); 158 159 if (handler_base == NULL) { 160 // Not enough space left for the handler. 161 bailout("exception handler overflow"); 162 return -1; 163 } 164 165 int offset = code_offset(); 166 address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 167 //__ load_const_optimized(R0, entry_point); 168 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); 169 __ mtctr(R0); 170 __ bctr(); 171 172 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 173 __ end_a_stub(); 174 175 return offset; 176 } 177 178 179 // Emit the code to remove the frame from the stack in the exception 180 // unwind path. 181 int LIR_Assembler::emit_unwind_handler() { 182 _masm->block_comment("Unwind handler"); 183 184 int offset = code_offset(); 185 bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes(); 186 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31; 187 188 // Fetch the exception from TLS and clear out exception related thread state. 189 __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 190 __ li(R0, 0); 191 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 192 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 193 194 __ bind(_unwind_handler_entry); 195 __ verify_not_null_oop(Rexception); 196 if (preserve_exception) { __ mr(Rexception_save, Rexception); } 197 198 // Perform needed unlocking 199 MonitorExitStub* stub = NULL; 200 if (method()->is_synchronized()) { 201 monitor_address(0, FrameMap::R4_opr); 202 stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); 203 __ unlock_object(R5, R6, R4, *stub->entry()); 204 __ bind(*stub->continuation()); 205 } 206 207 if (compilation()->env()->dtrace_method_probes()) { 208 Unimplemented(); 209 } 210 211 // Dispatch to the unwind logic. 212 address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); 213 //__ load_const_optimized(R0, unwind_stub); 214 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); 215 if (preserve_exception) { __ mr(Rexception, Rexception_save); } 216 __ mtctr(R0); 217 __ bctr(); 218 219 // Emit the slow path assembly. 220 if (stub != NULL) { 221 stub->emit_code(this); 222 } 223 224 return offset; 225 } 226 227 228 int LIR_Assembler::emit_deopt_handler() { 229 // If the last instruction is a call (typically to do a throw which 230 // is coming at the end after block reordering) the return address 231 // must still point into the code area in order to avoid assertion 232 // failures when searching for the corresponding bci => add a nop 233 // (was bug 5/14/1999 - gri). 234 __ nop(); 235 236 // Generate code for deopt handler. 237 address handler_base = __ start_a_stub(deopt_handler_size()); 238 239 if (handler_base == NULL) { 240 // Not enough space left for the handler. 241 bailout("deopt handler overflow"); 242 return -1; 243 } 244 245 int offset = code_offset(); 246 __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); 247 248 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 249 __ end_a_stub(); 250 251 return offset; 252 } 253 254 255 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 256 if (o == NULL) { 257 __ li(reg, 0); 258 } else { 259 AddressLiteral addrlit = __ constant_oop_address(o); 260 __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg); 261 } 262 } 263 264 265 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 266 // Allocate a new index in table to hold the object once it's been patched. 267 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 268 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 269 270 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); 271 __ load_const(reg, addrlit, R0); 272 273 patching_epilog(patch, lir_patch_normal, reg, info); 274 } 275 276 277 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 278 AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation) 279 __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg); 280 } 281 282 283 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 284 // Allocate a new index in table to hold the klass once it's been patched. 285 int index = __ oop_recorder()->allocate_metadata_index(NULL); 286 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 287 288 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); 289 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 290 __ load_const(reg, addrlit, R0); 291 292 patching_epilog(patch, lir_patch_normal, reg, info); 293 } 294 295 296 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 297 const bool is_int = result->is_single_cpu(); 298 Register Rdividend = is_int ? left->as_register() : left->as_register_lo(); 299 Register Rdivisor = noreg; 300 Register Rscratch = temp->as_register(); 301 Register Rresult = is_int ? result->as_register() : result->as_register_lo(); 302 long divisor = -1; 303 304 if (right->is_register()) { 305 Rdivisor = is_int ? right->as_register() : right->as_register_lo(); 306 } else { 307 divisor = is_int ? right->as_constant_ptr()->as_jint() 308 : right->as_constant_ptr()->as_jlong(); 309 } 310 311 assert(Rdividend != Rscratch, ""); 312 assert(Rdivisor != Rscratch, ""); 313 assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv"); 314 315 if (Rdivisor == noreg) { 316 if (divisor == 1) { // stupid, but can happen 317 if (code == lir_idiv) { 318 __ mr_if_needed(Rresult, Rdividend); 319 } else { 320 __ li(Rresult, 0); 321 } 322 323 } else if (is_power_of_2(divisor)) { 324 // Convert division by a power of two into some shifts and logical operations. 325 int log2 = log2_intptr(divisor); 326 327 // Round towards 0. 328 if (divisor == 2) { 329 if (is_int) { 330 __ srwi(Rscratch, Rdividend, 31); 331 } else { 332 __ srdi(Rscratch, Rdividend, 63); 333 } 334 } else { 335 if (is_int) { 336 __ srawi(Rscratch, Rdividend, 31); 337 } else { 338 __ sradi(Rscratch, Rdividend, 63); 339 } 340 __ clrldi(Rscratch, Rscratch, 64-log2); 341 } 342 __ add(Rscratch, Rdividend, Rscratch); 343 344 if (code == lir_idiv) { 345 if (is_int) { 346 __ srawi(Rresult, Rscratch, log2); 347 } else { 348 __ sradi(Rresult, Rscratch, log2); 349 } 350 } else { // lir_irem 351 __ clrrdi(Rscratch, Rscratch, log2); 352 __ sub(Rresult, Rdividend, Rscratch); 353 } 354 355 } else if (divisor == -1) { 356 if (code == lir_idiv) { 357 __ neg(Rresult, Rdividend); 358 } else { 359 __ li(Rresult, 0); 360 } 361 362 } else { 363 __ load_const_optimized(Rscratch, divisor); 364 if (code == lir_idiv) { 365 if (is_int) { 366 __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 367 } else { 368 __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 369 } 370 } else { 371 assert(Rscratch != R0, "need both"); 372 if (is_int) { 373 __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1. 374 __ mullw(Rscratch, R0, Rscratch); 375 } else { 376 __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1. 377 __ mulld(Rscratch, R0, Rscratch); 378 } 379 __ sub(Rresult, Rdividend, Rscratch); 380 } 381 382 } 383 return; 384 } 385 386 Label regular, done; 387 if (is_int) { 388 __ cmpwi(CCR0, Rdivisor, -1); 389 } else { 390 __ cmpdi(CCR0, Rdivisor, -1); 391 } 392 __ bne(CCR0, regular); 393 if (code == lir_idiv) { 394 __ neg(Rresult, Rdividend); 395 __ b(done); 396 __ bind(regular); 397 if (is_int) { 398 __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 399 } else { 400 __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 401 } 402 } else { // lir_irem 403 __ li(Rresult, 0); 404 __ b(done); 405 __ bind(regular); 406 if (is_int) { 407 __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 408 __ mullw(Rscratch, Rscratch, Rdivisor); 409 } else { 410 __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 411 __ mulld(Rscratch, Rscratch, Rdivisor); 412 } 413 __ sub(Rresult, Rdividend, Rscratch); 414 } 415 __ bind(done); 416 } 417 418 419 void LIR_Assembler::emit_op3(LIR_Op3* op) { 420 switch (op->code()) { 421 case lir_idiv: 422 case lir_irem: 423 arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), 424 op->result_opr(), op->info()); 425 break; 426 case lir_fmad: 427 __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), 428 op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); 429 break; 430 case lir_fmaf: 431 __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), 432 op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); 433 break; 434 default: ShouldNotReachHere(); break; 435 } 436 } 437 438 439 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 440 #ifdef ASSERT 441 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 442 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 443 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 444 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 445 #endif 446 447 Label *L = op->label(); 448 if (op->cond() == lir_cond_always) { 449 __ b(*L); 450 } else { 451 Label done; 452 bool is_unordered = false; 453 if (op->code() == lir_cond_float_branch) { 454 assert(op->ublock() != NULL, "must have unordered successor"); 455 is_unordered = true; 456 } else { 457 assert(op->code() == lir_branch, "just checking"); 458 } 459 460 bool positive = false; 461 Assembler::Condition cond = Assembler::equal; 462 switch (op->cond()) { 463 case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break; 464 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break; 465 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 466 case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 467 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 468 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 469 case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 470 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 471 default: ShouldNotReachHere(); 472 } 473 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 474 int bi = Assembler::bi0(BOOL_RESULT, cond); 475 if (is_unordered) { 476 if (positive) { 477 if (op->ublock() == op->block()) { 478 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L); 479 } 480 } else { 481 if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); } 482 } 483 } 484 __ bc_far_optimized(bo, bi, *L); 485 __ bind(done); 486 } 487 } 488 489 490 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 491 Bytecodes::Code code = op->bytecode(); 492 LIR_Opr src = op->in_opr(), 493 dst = op->result_opr(); 494 495 switch(code) { 496 case Bytecodes::_i2l: { 497 __ extsw(dst->as_register_lo(), src->as_register()); 498 break; 499 } 500 case Bytecodes::_l2i: { 501 __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage 502 break; 503 } 504 case Bytecodes::_i2b: { 505 __ extsb(dst->as_register(), src->as_register()); 506 break; 507 } 508 case Bytecodes::_i2c: { 509 __ clrldi(dst->as_register(), src->as_register(), 64-16); 510 break; 511 } 512 case Bytecodes::_i2s: { 513 __ extsh(dst->as_register(), src->as_register()); 514 break; 515 } 516 case Bytecodes::_i2d: 517 case Bytecodes::_l2d: { 518 bool src_in_memory = !VM_Version::has_mtfprd(); 519 FloatRegister rdst = dst->as_double_reg(); 520 FloatRegister rsrc; 521 if (src_in_memory) { 522 rsrc = src->as_double_reg(); // via mem 523 } else { 524 // move src to dst register 525 if (code == Bytecodes::_i2d) { 526 __ mtfprwa(rdst, src->as_register()); 527 } else { 528 __ mtfprd(rdst, src->as_register_lo()); 529 } 530 rsrc = rdst; 531 } 532 __ fcfid(rdst, rsrc); 533 break; 534 } 535 case Bytecodes::_i2f: 536 case Bytecodes::_l2f: { 537 bool src_in_memory = !VM_Version::has_mtfprd(); 538 FloatRegister rdst = dst->as_float_reg(); 539 FloatRegister rsrc; 540 if (src_in_memory) { 541 rsrc = src->as_double_reg(); // via mem 542 } else { 543 // move src to dst register 544 if (code == Bytecodes::_i2f) { 545 __ mtfprwa(rdst, src->as_register()); 546 } else { 547 __ mtfprd(rdst, src->as_register_lo()); 548 } 549 rsrc = rdst; 550 } 551 if (VM_Version::has_fcfids()) { 552 __ fcfids(rdst, rsrc); 553 } else { 554 assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility"); 555 __ fcfid(rdst, rsrc); 556 __ frsp(rdst, rdst); 557 } 558 break; 559 } 560 case Bytecodes::_f2d: { 561 __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); 562 break; 563 } 564 case Bytecodes::_d2f: { 565 __ frsp(dst->as_float_reg(), src->as_double_reg()); 566 break; 567 } 568 case Bytecodes::_d2i: 569 case Bytecodes::_f2i: { 570 bool dst_in_memory = !VM_Version::has_mtfprd(); 571 FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg(); 572 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 573 Label L; 574 // Result must be 0 if value is NaN; test by comparing value to itself. 575 __ fcmpu(CCR0, rsrc, rsrc); 576 if (dst_in_memory) { 577 __ li(R0, 0); // 0 in case of NAN 578 __ std(R0, addr.disp(), addr.base()); 579 } else { 580 __ li(dst->as_register(), 0); 581 } 582 __ bso(CCR0, L); 583 __ fctiwz(rsrc, rsrc); // USE_KILL 584 if (dst_in_memory) { 585 __ stfd(rsrc, addr.disp(), addr.base()); 586 } else { 587 __ mffprd(dst->as_register(), rsrc); 588 } 589 __ bind(L); 590 break; 591 } 592 case Bytecodes::_d2l: 593 case Bytecodes::_f2l: { 594 bool dst_in_memory = !VM_Version::has_mtfprd(); 595 FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg(); 596 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 597 Label L; 598 // Result must be 0 if value is NaN; test by comparing value to itself. 599 __ fcmpu(CCR0, rsrc, rsrc); 600 if (dst_in_memory) { 601 __ li(R0, 0); // 0 in case of NAN 602 __ std(R0, addr.disp(), addr.base()); 603 } else { 604 __ li(dst->as_register_lo(), 0); 605 } 606 __ bso(CCR0, L); 607 __ fctidz(rsrc, rsrc); // USE_KILL 608 if (dst_in_memory) { 609 __ stfd(rsrc, addr.disp(), addr.base()); 610 } else { 611 __ mffprd(dst->as_register_lo(), rsrc); 612 } 613 __ bind(L); 614 break; 615 } 616 617 default: ShouldNotReachHere(); 618 } 619 } 620 621 622 void LIR_Assembler::align_call(LIR_Code) { 623 // do nothing since all instructions are word aligned on ppc 624 } 625 626 627 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { 628 int start_offset = __ offset(); 629 // Put the entry point as a constant into the constant pool. 630 const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); 631 if (entry_point_toc_addr == NULL) { 632 bailout("const section overflow"); 633 return false; 634 } 635 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 636 637 // Emit the trampoline stub which will be related to the branch-and-link below. 638 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); 639 if (!stub) { 640 bailout("no space for trampoline stub"); 641 return false; 642 } 643 return true; 644 } 645 646 647 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 648 assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype"); 649 650 bool success = emit_trampoline_stub_for_call(op->addr()); 651 if (!success) { return; } 652 653 __ relocate(rtype); 654 // Note: At this point we do not have the address of the trampoline 655 // stub, and the entry point might be too far away for bl, so __ pc() 656 // serves as dummy and the bl will be patched later. 657 __ code()->set_insts_mark(); 658 __ bl(__ pc()); 659 add_call_info(code_offset(), op->info()); 660 } 661 662 663 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 664 __ calculate_address_from_global_toc(R2_TOC, __ method_toc()); 665 666 // Virtual call relocation will point to ic load. 667 address virtual_call_meta_addr = __ pc(); 668 // Load a clear inline cache. 669 AddressLiteral empty_ic((address) Universe::non_oop_word()); 670 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); 671 if (!success) { 672 bailout("const section overflow"); 673 return; 674 } 675 // Call to fixup routine. Fixup routine uses ScopeDesc info 676 // to determine who we intended to call. 677 __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); 678 679 success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); 680 if (!success) { return; } 681 682 // Note: At this point we do not have the address of the trampoline 683 // stub, and the entry point might be too far away for bl, so __ pc() 684 // serves as dummy and the bl will be patched later. 685 __ bl(__ pc()); 686 add_call_info(code_offset(), op->info()); 687 } 688 689 690 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 691 ShouldNotReachHere(); // ic_call is used instead. 692 } 693 694 695 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) { 696 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info); 697 __ null_check(addr, stub->entry()); 698 append_code_stub(stub); 699 } 700 701 702 // Attention: caller must encode oop if needed 703 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 704 int store_offset; 705 if (!Assembler::is_simm16(offset)) { 706 // For offsets larger than a simm16 we setup the offset. 707 assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case"); 708 __ load_const_optimized(R0, offset); 709 store_offset = store(from_reg, base, R0, type, wide); 710 } else { 711 store_offset = code_offset(); 712 switch (type) { 713 case T_BOOLEAN: // fall through 714 case T_BYTE : __ stb(from_reg->as_register(), offset, base); break; 715 case T_CHAR : 716 case T_SHORT : __ sth(from_reg->as_register(), offset, base); break; 717 case T_INT : __ stw(from_reg->as_register(), offset, base); break; 718 case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break; 719 case T_ADDRESS: 720 case T_METADATA: __ std(from_reg->as_register(), offset, base); break; 721 case T_ARRAY : // fall through 722 case T_OBJECT: 723 { 724 if (UseCompressedOops && !wide) { 725 // Encoding done in caller 726 __ stw(from_reg->as_register(), offset, base); 727 } else { 728 __ std(from_reg->as_register(), offset, base); 729 } 730 __ verify_oop(from_reg->as_register()); 731 break; 732 } 733 case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break; 734 case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break; 735 default : ShouldNotReachHere(); 736 } 737 } 738 return store_offset; 739 } 740 741 742 // Attention: caller must encode oop if needed 743 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 744 int store_offset = code_offset(); 745 switch (type) { 746 case T_BOOLEAN: // fall through 747 case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break; 748 case T_CHAR : 749 case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break; 750 case T_INT : __ stwx(from_reg->as_register(), base, disp); break; 751 case T_LONG : 752 #ifdef _LP64 753 __ stdx(from_reg->as_register_lo(), base, disp); 754 #else 755 Unimplemented(); 756 #endif 757 break; 758 case T_ADDRESS: 759 __ stdx(from_reg->as_register(), base, disp); 760 break; 761 case T_ARRAY : // fall through 762 case T_OBJECT: 763 { 764 if (UseCompressedOops && !wide) { 765 // Encoding done in caller. 766 __ stwx(from_reg->as_register(), base, disp); 767 } else { 768 __ stdx(from_reg->as_register(), base, disp); 769 } 770 __ verify_oop(from_reg->as_register()); // kills R0 771 break; 772 } 773 case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break; 774 case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break; 775 default : ShouldNotReachHere(); 776 } 777 return store_offset; 778 } 779 780 781 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 782 int load_offset; 783 if (!Assembler::is_simm16(offset)) { 784 // For offsets larger than a simm16 we setup the offset. 785 __ load_const_optimized(R0, offset); 786 load_offset = load(base, R0, to_reg, type, wide); 787 } else { 788 load_offset = code_offset(); 789 switch(type) { 790 case T_BOOLEAN: // fall through 791 case T_BYTE : __ lbz(to_reg->as_register(), offset, base); 792 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 793 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break; 794 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break; 795 case T_INT : __ lwa(to_reg->as_register(), offset, base); break; 796 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break; 797 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break; 798 case T_ADDRESS: 799 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 800 __ lwz(to_reg->as_register(), offset, base); 801 __ decode_klass_not_null(to_reg->as_register()); 802 } else { 803 __ ld(to_reg->as_register(), offset, base); 804 } 805 break; 806 case T_ARRAY : // fall through 807 case T_OBJECT: 808 { 809 if (UseCompressedOops && !wide) { 810 __ lwz(to_reg->as_register(), offset, base); 811 __ decode_heap_oop(to_reg->as_register()); 812 } else { 813 __ ld(to_reg->as_register(), offset, base); 814 } 815 __ verify_oop(to_reg->as_register()); 816 break; 817 } 818 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break; 819 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break; 820 default : ShouldNotReachHere(); 821 } 822 } 823 return load_offset; 824 } 825 826 827 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 828 int load_offset = code_offset(); 829 switch(type) { 830 case T_BOOLEAN: // fall through 831 case T_BYTE : __ lbzx(to_reg->as_register(), base, disp); 832 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 833 case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break; 834 case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break; 835 case T_INT : __ lwax(to_reg->as_register(), base, disp); break; 836 case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break; 837 case T_ARRAY : // fall through 838 case T_OBJECT: 839 { 840 if (UseCompressedOops && !wide) { 841 __ lwzx(to_reg->as_register(), base, disp); 842 __ decode_heap_oop(to_reg->as_register()); 843 } else { 844 __ ldx(to_reg->as_register(), base, disp); 845 } 846 __ verify_oop(to_reg->as_register()); 847 break; 848 } 849 case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break; 850 case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break; 851 case T_LONG : 852 #ifdef _LP64 853 __ ldx(to_reg->as_register_lo(), base, disp); 854 #else 855 Unimplemented(); 856 #endif 857 break; 858 default : ShouldNotReachHere(); 859 } 860 return load_offset; 861 } 862 863 864 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 865 LIR_Const* c = src->as_constant_ptr(); 866 Register src_reg = R0; 867 switch (c->type()) { 868 case T_INT: 869 case T_FLOAT: { 870 int value = c->as_jint_bits(); 871 __ load_const_optimized(src_reg, value); 872 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 873 __ stw(src_reg, addr.disp(), addr.base()); 874 break; 875 } 876 case T_ADDRESS: { 877 int value = c->as_jint_bits(); 878 __ load_const_optimized(src_reg, value); 879 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 880 __ std(src_reg, addr.disp(), addr.base()); 881 break; 882 } 883 case T_OBJECT: { 884 jobject2reg(c->as_jobject(), src_reg); 885 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 886 __ std(src_reg, addr.disp(), addr.base()); 887 break; 888 } 889 case T_LONG: 890 case T_DOUBLE: { 891 int value = c->as_jlong_bits(); 892 __ load_const_optimized(src_reg, value); 893 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 894 __ std(src_reg, addr.disp(), addr.base()); 895 break; 896 } 897 default: 898 Unimplemented(); 899 } 900 } 901 902 903 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 904 LIR_Const* c = src->as_constant_ptr(); 905 LIR_Address* addr = dest->as_address_ptr(); 906 Register base = addr->base()->as_pointer_register(); 907 LIR_Opr tmp = LIR_OprFact::illegalOpr; 908 int offset = -1; 909 // Null check for large offsets in LIRGenerator::do_StoreField. 910 bool needs_explicit_null_check = !ImplicitNullChecks; 911 912 if (info != NULL && needs_explicit_null_check) { 913 explicit_null_check(base, info); 914 } 915 916 switch (c->type()) { 917 case T_FLOAT: type = T_INT; 918 case T_INT: 919 case T_ADDRESS: { 920 tmp = FrameMap::R0_opr; 921 __ load_const_optimized(tmp->as_register(), c->as_jint_bits()); 922 break; 923 } 924 case T_DOUBLE: type = T_LONG; 925 case T_LONG: { 926 tmp = FrameMap::R0_long_opr; 927 __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits()); 928 break; 929 } 930 case T_OBJECT: { 931 tmp = FrameMap::R0_opr; 932 if (UseCompressedOops && !wide && c->as_jobject() != NULL) { 933 AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); 934 __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw). 935 __ relocate(oop_addr.rspec(), /*compressed format*/ 1); 936 __ ori(R0, R0, oop_addr.value() & 0xffff); 937 } else { 938 jobject2reg(c->as_jobject(), R0); 939 } 940 break; 941 } 942 default: 943 Unimplemented(); 944 } 945 946 // Handle either reg+reg or reg+disp address. 947 if (addr->index()->is_valid()) { 948 assert(addr->disp() == 0, "must be zero"); 949 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 950 } else { 951 assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses"); 952 offset = store(tmp, base, addr->disp(), type, wide, false); 953 } 954 955 if (info != NULL) { 956 assert(offset != -1, "offset should've been set"); 957 if (!needs_explicit_null_check) { 958 add_debug_info_for_null_check(offset, info); 959 } 960 } 961 } 962 963 964 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 965 LIR_Const* c = src->as_constant_ptr(); 966 LIR_Opr to_reg = dest; 967 968 switch (c->type()) { 969 case T_INT: { 970 assert(patch_code == lir_patch_none, "no patching handled here"); 971 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); 972 break; 973 } 974 case T_ADDRESS: { 975 assert(patch_code == lir_patch_none, "no patching handled here"); 976 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ... 977 break; 978 } 979 case T_LONG: { 980 assert(patch_code == lir_patch_none, "no patching handled here"); 981 __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0); 982 break; 983 } 984 985 case T_OBJECT: { 986 if (patch_code == lir_patch_none) { 987 jobject2reg(c->as_jobject(), to_reg->as_register()); 988 } else { 989 jobject2reg_with_patching(to_reg->as_register(), info); 990 } 991 break; 992 } 993 994 case T_METADATA: 995 { 996 if (patch_code == lir_patch_none) { 997 metadata2reg(c->as_metadata(), to_reg->as_register()); 998 } else { 999 klass2reg_with_patching(to_reg->as_register(), info); 1000 } 1001 } 1002 break; 1003 1004 case T_FLOAT: 1005 { 1006 if (to_reg->is_single_fpu()) { 1007 address const_addr = __ float_constant(c->as_jfloat()); 1008 if (const_addr == NULL) { 1009 bailout("const section overflow"); 1010 break; 1011 } 1012 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1013 __ relocate(rspec); 1014 __ load_const(R0, const_addr); 1015 __ lfsx(to_reg->as_float_reg(), R0); 1016 } else { 1017 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1018 __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0); 1019 } 1020 } 1021 break; 1022 1023 case T_DOUBLE: 1024 { 1025 if (to_reg->is_double_fpu()) { 1026 address const_addr = __ double_constant(c->as_jdouble()); 1027 if (const_addr == NULL) { 1028 bailout("const section overflow"); 1029 break; 1030 } 1031 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1032 __ relocate(rspec); 1033 __ load_const(R0, const_addr); 1034 __ lfdx(to_reg->as_double_reg(), R0); 1035 } else { 1036 assert(to_reg->is_double_cpu(), "Must be a long register."); 1037 __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0); 1038 } 1039 } 1040 break; 1041 1042 default: 1043 ShouldNotReachHere(); 1044 } 1045 } 1046 1047 1048 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1049 Unimplemented(); return Address(); 1050 } 1051 1052 1053 inline RegisterOrConstant index_or_disp(LIR_Address* addr) { 1054 if (addr->index()->is_illegal()) { 1055 return (RegisterOrConstant)(addr->disp()); 1056 } else { 1057 return (RegisterOrConstant)(addr->index()->as_pointer_register()); 1058 } 1059 } 1060 1061 1062 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1063 const Register tmp = R0; 1064 switch (type) { 1065 case T_INT: 1066 case T_FLOAT: { 1067 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1068 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1069 __ lwz(tmp, from.disp(), from.base()); 1070 __ stw(tmp, to.disp(), to.base()); 1071 break; 1072 } 1073 case T_ADDRESS: 1074 case T_OBJECT: { 1075 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1076 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1077 __ ld(tmp, from.disp(), from.base()); 1078 __ std(tmp, to.disp(), to.base()); 1079 break; 1080 } 1081 case T_LONG: 1082 case T_DOUBLE: { 1083 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1084 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1085 __ ld(tmp, from.disp(), from.base()); 1086 __ std(tmp, to.disp(), to.base()); 1087 break; 1088 } 1089 1090 default: 1091 ShouldNotReachHere(); 1092 } 1093 } 1094 1095 1096 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1097 Unimplemented(); return Address(); 1098 } 1099 1100 1101 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1102 Unimplemented(); return Address(); 1103 } 1104 1105 1106 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1107 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1108 1109 assert(type != T_METADATA, "load of metadata ptr not supported"); 1110 LIR_Address* addr = src_opr->as_address_ptr(); 1111 LIR_Opr to_reg = dest; 1112 1113 Register src = addr->base()->as_pointer_register(); 1114 Register disp_reg = noreg; 1115 int disp_value = addr->disp(); 1116 bool needs_patching = (patch_code != lir_patch_none); 1117 // null check for large offsets in LIRGenerator::do_LoadField 1118 bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; 1119 1120 if (info != NULL && needs_explicit_null_check) { 1121 explicit_null_check(src, info); 1122 } 1123 1124 if (addr->base()->type() == T_OBJECT) { 1125 __ verify_oop(src); 1126 } 1127 1128 PatchingStub* patch = NULL; 1129 if (needs_patching) { 1130 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1131 assert(!to_reg->is_double_cpu() || 1132 patch_code == lir_patch_none || 1133 patch_code == lir_patch_normal, "patching doesn't match register"); 1134 } 1135 1136 if (addr->index()->is_illegal()) { 1137 if (!Assembler::is_simm16(disp_value)) { 1138 if (needs_patching) { 1139 __ load_const32(R0, 0); // patchable int 1140 } else { 1141 __ load_const_optimized(R0, disp_value); 1142 } 1143 disp_reg = R0; 1144 } 1145 } else { 1146 disp_reg = addr->index()->as_pointer_register(); 1147 assert(disp_value == 0, "can't handle 3 operand addresses"); 1148 } 1149 1150 // Remember the offset of the load. The patching_epilog must be done 1151 // before the call to add_debug_info, otherwise the PcDescs don't get 1152 // entered in increasing order. 1153 int offset; 1154 1155 if (disp_reg == noreg) { 1156 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1157 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1158 } else { 1159 assert(!unaligned, "unexpected"); 1160 offset = load(src, disp_reg, to_reg, type, wide); 1161 } 1162 1163 if (patch != NULL) { 1164 patching_epilog(patch, patch_code, src, info); 1165 } 1166 if (info != NULL && !needs_explicit_null_check) { 1167 add_debug_info_for_null_check(offset, info); 1168 } 1169 } 1170 1171 1172 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1173 Address addr; 1174 if (src->is_single_word()) { 1175 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1176 } else if (src->is_double_word()) { 1177 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1178 } 1179 1180 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1181 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1182 } 1183 1184 1185 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1186 Address addr; 1187 if (dest->is_single_word()) { 1188 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1189 } else if (dest->is_double_word()) { 1190 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1191 } 1192 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1193 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1194 } 1195 1196 1197 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1198 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1199 if (from_reg->is_double_fpu()) { 1200 // double to double moves 1201 assert(to_reg->is_double_fpu(), "should match"); 1202 __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg()); 1203 } else { 1204 // float to float moves 1205 assert(to_reg->is_single_fpu(), "should match"); 1206 __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg()); 1207 } 1208 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1209 if (from_reg->is_double_cpu()) { 1210 __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1211 } else if (to_reg->is_double_cpu()) { 1212 // int to int moves 1213 __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register()); 1214 } else { 1215 // int to int moves 1216 __ mr_if_needed(to_reg->as_register(), from_reg->as_register()); 1217 } 1218 } else { 1219 ShouldNotReachHere(); 1220 } 1221 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1222 __ verify_oop(to_reg->as_register()); 1223 } 1224 } 1225 1226 1227 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1228 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1229 bool wide, bool unaligned) { 1230 assert(type != T_METADATA, "store of metadata ptr not supported"); 1231 LIR_Address* addr = dest->as_address_ptr(); 1232 1233 Register src = addr->base()->as_pointer_register(); 1234 Register disp_reg = noreg; 1235 int disp_value = addr->disp(); 1236 bool needs_patching = (patch_code != lir_patch_none); 1237 bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide && 1238 Universe::narrow_oop_mode() != Universe::UnscaledNarrowOop; 1239 bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value); 1240 bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29. 1241 // Null check for large offsets in LIRGenerator::do_StoreField. 1242 bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; 1243 1244 if (info != NULL && needs_explicit_null_check) { 1245 explicit_null_check(src, info); 1246 } 1247 1248 if (addr->base()->is_oop_register()) { 1249 __ verify_oop(src); 1250 } 1251 1252 PatchingStub* patch = NULL; 1253 if (needs_patching) { 1254 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1255 assert(!from_reg->is_double_cpu() || 1256 patch_code == lir_patch_none || 1257 patch_code == lir_patch_normal, "patching doesn't match register"); 1258 } 1259 1260 if (addr->index()->is_illegal()) { 1261 if (load_disp) { 1262 disp_reg = use_R29 ? R29_TOC : R0; 1263 if (needs_patching) { 1264 __ load_const32(disp_reg, 0); // patchable int 1265 } else { 1266 __ load_const_optimized(disp_reg, disp_value); 1267 } 1268 } 1269 } else { 1270 disp_reg = addr->index()->as_pointer_register(); 1271 assert(disp_value == 0, "can't handle 3 operand addresses"); 1272 } 1273 1274 // remember the offset of the store. The patching_epilog must be done 1275 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1276 // entered in increasing order. 1277 int offset; 1278 1279 if (compress_oop) { 1280 Register co = __ encode_heap_oop(R0, from_reg->as_register()); 1281 from_reg = FrameMap::as_opr(co); 1282 } 1283 1284 if (disp_reg == noreg) { 1285 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1286 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1287 } else { 1288 assert(!unaligned, "unexpected"); 1289 offset = store(from_reg, src, disp_reg, type, wide); 1290 } 1291 1292 if (use_R29) { 1293 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit 1294 } 1295 1296 if (patch != NULL) { 1297 patching_epilog(patch, patch_code, src, info); 1298 } 1299 1300 if (info != NULL && !needs_explicit_null_check) { 1301 add_debug_info_for_null_check(offset, info); 1302 } 1303 } 1304 1305 1306 void LIR_Assembler::return_op(LIR_Opr result) { 1307 const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). 1308 const Register polling_page = R12; 1309 1310 // Pop the stack before the safepoint code. 1311 int frame_size = initial_frame_size_in_bytes(); 1312 if (Assembler::is_simm(frame_size, 16)) { 1313 __ addi(R1_SP, R1_SP, frame_size); 1314 } else { 1315 __ pop_frame(); 1316 } 1317 1318 if (SafepointMechanism::uses_thread_local_poll()) { 1319 __ ld(polling_page, in_bytes(Thread::polling_page_offset()), R16_thread); 1320 } else { 1321 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); 1322 } 1323 1324 // Restore return pc relative to callers' sp. 1325 __ ld(return_pc, _abi(lr), R1_SP); 1326 // Move return pc to LR. 1327 __ mtlr(return_pc); 1328 1329 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1330 __ reserved_stack_check(return_pc); 1331 } 1332 1333 // We need to mark the code position where the load from the safepoint 1334 // polling page was emitted as relocInfo::poll_return_type here. 1335 __ relocate(relocInfo::poll_return_type); 1336 __ load_from_polling_page(polling_page); 1337 1338 // Return. 1339 __ blr(); 1340 } 1341 1342 1343 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1344 const Register poll_addr = tmp->as_register(); 1345 if (SafepointMechanism::uses_thread_local_poll()) { 1346 __ ld(poll_addr, in_bytes(Thread::polling_page_offset()), R16_thread); 1347 } else { 1348 __ load_const_optimized(poll_addr, (intptr_t)os::get_polling_page(), R0); 1349 } 1350 if (info != NULL) { 1351 add_debug_info_for_branch(info); 1352 } 1353 int offset = __ offset(); 1354 __ relocate(relocInfo::poll_type); 1355 __ load_from_polling_page(poll_addr); 1356 1357 return offset; 1358 } 1359 1360 1361 void LIR_Assembler::emit_static_call_stub() { 1362 address call_pc = __ pc(); 1363 address stub = __ start_a_stub(static_call_stub_size()); 1364 if (stub == NULL) { 1365 bailout("static call stub overflow"); 1366 return; 1367 } 1368 1369 // For java_to_interp stubs we use R11_scratch1 as scratch register 1370 // and in call trampoline stubs we use R12_scratch2. This way we 1371 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1372 const Register reg_scratch = R11_scratch1; 1373 1374 // Create a static stub relocation which relates this stub 1375 // with the call instruction at insts_call_instruction_offset in the 1376 // instructions code-section. 1377 int start = __ offset(); 1378 __ relocate(static_stub_Relocation::spec(call_pc)); 1379 1380 // Now, create the stub's code: 1381 // - load the TOC 1382 // - load the inline cache oop from the constant pool 1383 // - load the call target from the constant pool 1384 // - call 1385 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1386 AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); 1387 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); 1388 1389 if (ReoptimizeCallSequences) { 1390 __ b64_patchable((address)-1, relocInfo::none); 1391 } else { 1392 AddressLiteral a((address)-1); 1393 success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); 1394 __ mtctr(reg_scratch); 1395 __ bctr(); 1396 } 1397 if (!success) { 1398 bailout("const section overflow"); 1399 return; 1400 } 1401 1402 assert(__ offset() - start <= static_call_stub_size(), "stub too big"); 1403 __ end_a_stub(); 1404 } 1405 1406 1407 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1408 bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual); 1409 if (opr1->is_single_fpu()) { 1410 __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg()); 1411 } else if (opr1->is_double_fpu()) { 1412 __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg()); 1413 } else if (opr1->is_single_cpu()) { 1414 if (opr2->is_constant()) { 1415 switch (opr2->as_constant_ptr()->type()) { 1416 case T_INT: 1417 { 1418 jint con = opr2->as_constant_ptr()->as_jint(); 1419 if (unsigned_comp) { 1420 if (Assembler::is_uimm(con, 16)) { 1421 __ cmplwi(BOOL_RESULT, opr1->as_register(), con); 1422 } else { 1423 __ load_const_optimized(R0, con); 1424 __ cmplw(BOOL_RESULT, opr1->as_register(), R0); 1425 } 1426 } else { 1427 if (Assembler::is_simm(con, 16)) { 1428 __ cmpwi(BOOL_RESULT, opr1->as_register(), con); 1429 } else { 1430 __ load_const_optimized(R0, con); 1431 __ cmpw(BOOL_RESULT, opr1->as_register(), R0); 1432 } 1433 } 1434 } 1435 break; 1436 1437 case T_OBJECT: 1438 // There are only equal/notequal comparisons on objects. 1439 { 1440 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1441 jobject con = opr2->as_constant_ptr()->as_jobject(); 1442 if (con == NULL) { 1443 __ cmpdi(BOOL_RESULT, opr1->as_register(), 0); 1444 } else { 1445 jobject2reg(con, R0); 1446 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1447 } 1448 } 1449 break; 1450 1451 default: 1452 ShouldNotReachHere(); 1453 break; 1454 } 1455 } else { 1456 if (opr2->is_address()) { 1457 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1458 LIR_Address *addr = opr2->as_address_ptr(); 1459 BasicType type = addr->type(); 1460 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1461 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1462 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1463 } else { 1464 if (unsigned_comp) { 1465 __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1466 } else { 1467 __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1468 } 1469 } 1470 } 1471 } else if (opr1->is_double_cpu()) { 1472 if (opr2->is_constant()) { 1473 jlong con = opr2->as_constant_ptr()->as_jlong(); 1474 if (unsigned_comp) { 1475 if (Assembler::is_uimm(con, 16)) { 1476 __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con); 1477 } else { 1478 __ load_const_optimized(R0, con); 1479 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0); 1480 } 1481 } else { 1482 if (Assembler::is_simm(con, 16)) { 1483 __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con); 1484 } else { 1485 __ load_const_optimized(R0, con); 1486 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0); 1487 } 1488 } 1489 } else if (opr2->is_register()) { 1490 if (unsigned_comp) { 1491 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1492 } else { 1493 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1494 } 1495 } else { 1496 ShouldNotReachHere(); 1497 } 1498 } else if (opr1->is_address()) { 1499 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1500 LIR_Address * addr = opr1->as_address_ptr(); 1501 BasicType type = addr->type(); 1502 assert (opr2->is_constant(), "Checking"); 1503 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1504 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1505 __ cmpdi(BOOL_RESULT, R0, opr2->as_constant_ptr()->as_jint()); 1506 } else { 1507 ShouldNotReachHere(); 1508 } 1509 } 1510 1511 1512 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1513 const Register Rdst = dst->as_register(); 1514 Label done; 1515 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1516 bool is_unordered_less = (code == lir_ucmp_fd2i); 1517 if (left->is_single_fpu()) { 1518 __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg()); 1519 } else if (left->is_double_fpu()) { 1520 __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg()); 1521 } else { 1522 ShouldNotReachHere(); 1523 } 1524 __ li(Rdst, is_unordered_less ? -1 : 1); 1525 __ bso(CCR0, done); 1526 } else if (code == lir_cmp_l2i) { 1527 __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo()); 1528 } else { 1529 ShouldNotReachHere(); 1530 } 1531 __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1532 __ srwi(Rdst, R0, 30); 1533 __ srawi(R0, R0, 31); 1534 __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1 1535 __ bind(done); 1536 } 1537 1538 1539 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { 1540 if (src->is_constant()) { 1541 lasm->const2reg(src, dst, lir_patch_none, NULL); 1542 } else if (src->is_register()) { 1543 lasm->reg2reg(src, dst); 1544 } else if (src->is_stack()) { 1545 lasm->stack2reg(src, dst, dst->type()); 1546 } else { 1547 ShouldNotReachHere(); 1548 } 1549 } 1550 1551 1552 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1553 if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) { 1554 load_to_reg(this, opr1, result); // Condition doesn't matter. 1555 return; 1556 } 1557 1558 bool positive = false; 1559 Assembler::Condition cond = Assembler::equal; 1560 switch (condition) { 1561 case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; 1562 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; 1563 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 1564 case lir_cond_belowEqual: 1565 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 1566 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 1567 case lir_cond_aboveEqual: 1568 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 1569 default: ShouldNotReachHere(); 1570 } 1571 1572 // Try to use isel on >=Power7. 1573 if (VM_Version::has_isel() && result->is_cpu_register()) { 1574 bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); 1575 const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo(); 1576 1577 // We can use result_reg to load one operand if not already in register. 1578 Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg, 1579 second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg; 1580 1581 if (first != second) { 1582 if (!o1_is_reg) { 1583 load_to_reg(this, opr1, result); 1584 } 1585 1586 if (!o2_is_reg) { 1587 load_to_reg(this, opr2, result); 1588 } 1589 1590 __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second); 1591 return; 1592 } 1593 } // isel 1594 1595 load_to_reg(this, opr1, result); 1596 1597 Label skip; 1598 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1599 int bi = Assembler::bi0(BOOL_RESULT, cond); 1600 __ bc(bo, bi, skip); 1601 1602 load_to_reg(this, opr2, result); 1603 __ bind(skip); 1604 } 1605 1606 1607 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1608 CodeEmitInfo* info, bool pop_fpu_stack) { 1609 assert(info == NULL, "unused on this code path"); 1610 assert(left->is_register(), "wrong items state"); 1611 assert(dest->is_register(), "wrong items state"); 1612 1613 if (right->is_register()) { 1614 if (dest->is_float_kind()) { 1615 1616 FloatRegister lreg, rreg, res; 1617 if (right->is_single_fpu()) { 1618 lreg = left->as_float_reg(); 1619 rreg = right->as_float_reg(); 1620 res = dest->as_float_reg(); 1621 switch (code) { 1622 case lir_add: __ fadds(res, lreg, rreg); break; 1623 case lir_sub: __ fsubs(res, lreg, rreg); break; 1624 case lir_mul: // fall through 1625 case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break; 1626 case lir_div: // fall through 1627 case lir_div_strictfp: __ fdivs(res, lreg, rreg); break; 1628 default: ShouldNotReachHere(); 1629 } 1630 } else { 1631 lreg = left->as_double_reg(); 1632 rreg = right->as_double_reg(); 1633 res = dest->as_double_reg(); 1634 switch (code) { 1635 case lir_add: __ fadd(res, lreg, rreg); break; 1636 case lir_sub: __ fsub(res, lreg, rreg); break; 1637 case lir_mul: // fall through 1638 case lir_mul_strictfp: __ fmul(res, lreg, rreg); break; 1639 case lir_div: // fall through 1640 case lir_div_strictfp: __ fdiv(res, lreg, rreg); break; 1641 default: ShouldNotReachHere(); 1642 } 1643 } 1644 1645 } else if (dest->is_double_cpu()) { 1646 1647 Register dst_lo = dest->as_register_lo(); 1648 Register op1_lo = left->as_pointer_register(); 1649 Register op2_lo = right->as_pointer_register(); 1650 1651 switch (code) { 1652 case lir_add: __ add(dst_lo, op1_lo, op2_lo); break; 1653 case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break; 1654 case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break; 1655 default: ShouldNotReachHere(); 1656 } 1657 } else { 1658 assert (right->is_single_cpu(), "Just Checking"); 1659 1660 Register lreg = left->as_register(); 1661 Register res = dest->as_register(); 1662 Register rreg = right->as_register(); 1663 switch (code) { 1664 case lir_add: __ add (res, lreg, rreg); break; 1665 case lir_sub: __ sub (res, lreg, rreg); break; 1666 case lir_mul: __ mullw(res, lreg, rreg); break; 1667 default: ShouldNotReachHere(); 1668 } 1669 } 1670 } else { 1671 assert (right->is_constant(), "must be constant"); 1672 1673 if (dest->is_single_cpu()) { 1674 Register lreg = left->as_register(); 1675 Register res = dest->as_register(); 1676 int simm16 = right->as_constant_ptr()->as_jint(); 1677 1678 switch (code) { 1679 case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int 1680 simm16 = -simm16; 1681 case lir_add: if (res == lreg && simm16 == 0) break; 1682 __ addi(res, lreg, simm16); break; 1683 case lir_mul: if (res == lreg && simm16 == 1) break; 1684 __ mulli(res, lreg, simm16); break; 1685 default: ShouldNotReachHere(); 1686 } 1687 } else { 1688 Register lreg = left->as_pointer_register(); 1689 Register res = dest->as_register_lo(); 1690 long con = right->as_constant_ptr()->as_jlong(); 1691 assert(Assembler::is_simm16(con), "must be simm16"); 1692 1693 switch (code) { 1694 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1695 con = -con; 1696 case lir_add: if (res == lreg && con == 0) break; 1697 __ addi(res, lreg, (int)con); break; 1698 case lir_mul: if (res == lreg && con == 1) break; 1699 __ mulli(res, lreg, (int)con); break; 1700 default: ShouldNotReachHere(); 1701 } 1702 } 1703 } 1704 } 1705 1706 1707 void LIR_Assembler::fpop() { 1708 Unimplemented(); 1709 // do nothing 1710 } 1711 1712 1713 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1714 switch (code) { 1715 case lir_sqrt: { 1716 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1717 break; 1718 } 1719 case lir_abs: { 1720 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1721 break; 1722 } 1723 default: { 1724 ShouldNotReachHere(); 1725 break; 1726 } 1727 } 1728 } 1729 1730 1731 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1732 if (right->is_constant()) { // see do_LogicOp 1733 long uimm; 1734 Register d, l; 1735 if (dest->is_single_cpu()) { 1736 uimm = right->as_constant_ptr()->as_jint(); 1737 d = dest->as_register(); 1738 l = left->as_register(); 1739 } else { 1740 uimm = right->as_constant_ptr()->as_jlong(); 1741 d = dest->as_register_lo(); 1742 l = left->as_register_lo(); 1743 } 1744 long uimms = (unsigned long)uimm >> 16, 1745 uimmss = (unsigned long)uimm >> 32; 1746 1747 switch (code) { 1748 case lir_logic_and: 1749 if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) { 1750 __ andi(d, l, uimm); // special cases 1751 } else if (uimms != 0) { __ andis_(d, l, uimms); } 1752 else { __ andi_(d, l, uimm); } 1753 break; 1754 1755 case lir_logic_or: 1756 if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); } 1757 else { __ ori(d, l, uimm); } 1758 break; 1759 1760 case lir_logic_xor: 1761 if (uimm == -1) { __ nand(d, l, l); } // special case 1762 else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); } 1763 else { __ xori(d, l, uimm); } 1764 break; 1765 1766 default: ShouldNotReachHere(); 1767 } 1768 } else { 1769 assert(right->is_register(), "right should be in register"); 1770 1771 if (dest->is_single_cpu()) { 1772 switch (code) { 1773 case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break; 1774 case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break; 1775 case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break; 1776 default: ShouldNotReachHere(); 1777 } 1778 } else { 1779 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1780 left->as_register_lo(); 1781 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1782 right->as_register_lo(); 1783 1784 switch (code) { 1785 case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break; 1786 case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break; 1787 case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break; 1788 default: ShouldNotReachHere(); 1789 } 1790 } 1791 } 1792 } 1793 1794 1795 int LIR_Assembler::shift_amount(BasicType t) { 1796 int elem_size = type2aelembytes(t); 1797 switch (elem_size) { 1798 case 1 : return 0; 1799 case 2 : return 1; 1800 case 4 : return 2; 1801 case 8 : return 3; 1802 } 1803 ShouldNotReachHere(); 1804 return -1; 1805 } 1806 1807 1808 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1809 info->add_register_oop(exceptionOop); 1810 1811 // Reuse the debug info from the safepoint poll for the throw op itself. 1812 address pc_for_athrow = __ pc(); 1813 int pc_for_athrow_offset = __ offset(); 1814 //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1815 //__ relocate(rspec); 1816 //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0); 1817 __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); 1818 add_call_info(pc_for_athrow_offset, info); // for exception handler 1819 1820 address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1821 : Runtime1::handle_exception_nofpu_id); 1822 //__ load_const_optimized(R0, stub); 1823 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 1824 __ mtctr(R0); 1825 __ bctr(); 1826 } 1827 1828 1829 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1830 // Note: Not used with EnableDebuggingOnDemand. 1831 assert(exceptionOop->as_register() == R3, "should match"); 1832 __ b(_unwind_handler_entry); 1833 } 1834 1835 1836 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1837 Register src = op->src()->as_register(); 1838 Register dst = op->dst()->as_register(); 1839 Register src_pos = op->src_pos()->as_register(); 1840 Register dst_pos = op->dst_pos()->as_register(); 1841 Register length = op->length()->as_register(); 1842 Register tmp = op->tmp()->as_register(); 1843 Register tmp2 = R0; 1844 1845 int flags = op->flags(); 1846 ciArrayKlass* default_type = op->expected_type(); 1847 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1848 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1849 1850 // Set up the arraycopy stub information. 1851 ArrayCopyStub* stub = op->stub(); 1852 const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. 1853 1854 // Always do stub if no type information is available. It's ok if 1855 // the known type isn't loaded since the code sanity checks 1856 // in debug mode and the type isn't required when we know the exact type 1857 // also check that the type is an array type. 1858 if (op->expected_type() == NULL) { 1859 assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && 1860 length->is_nonvolatile(), "must preserve"); 1861 // 3 parms are int. Convert to long. 1862 __ mr(R3_ARG1, src); 1863 __ extsw(R4_ARG2, src_pos); 1864 __ mr(R5_ARG3, dst); 1865 __ extsw(R6_ARG4, dst_pos); 1866 __ extsw(R7_ARG5, length); 1867 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1868 1869 if (copyfunc_addr == NULL) { // Use C version if stub was not generated. 1870 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 1871 __ call_c_with_frame_resize(entry, frame_resize); 1872 } else { 1873 #ifndef PRODUCT 1874 if (PrintC1Statistics) { 1875 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1876 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 1877 __ lwz(R11_scratch1, simm16_offs, tmp); 1878 __ addi(R11_scratch1, R11_scratch1, 1); 1879 __ stw(R11_scratch1, simm16_offs, tmp); 1880 } 1881 #endif 1882 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 1883 1884 __ nand(tmp, R3_RET, R3_RET); 1885 __ subf(length, tmp, length); 1886 __ add(src_pos, tmp, src_pos); 1887 __ add(dst_pos, tmp, dst_pos); 1888 } 1889 1890 __ cmpwi(CCR0, R3_RET, 0); 1891 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry()); 1892 __ bind(*stub->continuation()); 1893 return; 1894 } 1895 1896 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1897 Label cont, slow, copyfunc; 1898 1899 bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | 1900 LIR_OpArrayCopy::dst_null_check | 1901 LIR_OpArrayCopy::src_pos_positive_check | 1902 LIR_OpArrayCopy::dst_pos_positive_check | 1903 LIR_OpArrayCopy::length_positive_check); 1904 1905 // Use only one conditional branch for simple checks. 1906 if (simple_check_flag_set) { 1907 ConditionRegister combined_check = CCR1, tmp_check = CCR1; 1908 1909 // Make sure src and dst are non-null. 1910 if (flags & LIR_OpArrayCopy::src_null_check) { 1911 __ cmpdi(combined_check, src, 0); 1912 tmp_check = CCR0; 1913 } 1914 1915 if (flags & LIR_OpArrayCopy::dst_null_check) { 1916 __ cmpdi(tmp_check, dst, 0); 1917 if (tmp_check != combined_check) { 1918 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal); 1919 } 1920 tmp_check = CCR0; 1921 } 1922 1923 // Clear combined_check.eq if not already used. 1924 if (tmp_check == combined_check) { 1925 __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal); 1926 tmp_check = CCR0; 1927 } 1928 1929 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1930 // Test src_pos register. 1931 __ cmpwi(tmp_check, src_pos, 0); 1932 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1933 } 1934 1935 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1936 // Test dst_pos register. 1937 __ cmpwi(tmp_check, dst_pos, 0); 1938 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1939 } 1940 1941 if (flags & LIR_OpArrayCopy::length_positive_check) { 1942 // Make sure length isn't negative. 1943 __ cmpwi(tmp_check, length, 0); 1944 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1945 } 1946 1947 __ beq(combined_check, slow); 1948 } 1949 1950 // If the compiler was not able to prove that exact type of the source or the destination 1951 // of the arraycopy is an array type, check at runtime if the source or the destination is 1952 // an instance type. 1953 if (flags & LIR_OpArrayCopy::type_check) { 1954 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 1955 __ load_klass(tmp, dst); 1956 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1957 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1958 __ bge(CCR0, slow); 1959 } 1960 1961 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 1962 __ load_klass(tmp, src); 1963 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1964 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1965 __ bge(CCR0, slow); 1966 } 1967 } 1968 1969 // Higher 32bits must be null. 1970 __ extsw(length, length); 1971 1972 __ extsw(src_pos, src_pos); 1973 if (flags & LIR_OpArrayCopy::src_range_check) { 1974 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src); 1975 __ add(tmp, length, src_pos); 1976 __ cmpld(CCR0, tmp2, tmp); 1977 __ ble(CCR0, slow); 1978 } 1979 1980 __ extsw(dst_pos, dst_pos); 1981 if (flags & LIR_OpArrayCopy::dst_range_check) { 1982 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst); 1983 __ add(tmp, length, dst_pos); 1984 __ cmpld(CCR0, tmp2, tmp); 1985 __ ble(CCR0, slow); 1986 } 1987 1988 int shift = shift_amount(basic_type); 1989 1990 if (!(flags & LIR_OpArrayCopy::type_check)) { 1991 __ b(cont); 1992 } else { 1993 // We don't know the array types are compatible. 1994 if (basic_type != T_OBJECT) { 1995 // Simple test for basic type arrays. 1996 if (UseCompressedClassPointers) { 1997 // We don't need decode because we just need to compare. 1998 __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); 1999 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2000 __ cmpw(CCR0, tmp, tmp2); 2001 } else { 2002 __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); 2003 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2004 __ cmpd(CCR0, tmp, tmp2); 2005 } 2006 __ beq(CCR0, cont); 2007 } else { 2008 // For object arrays, if src is a sub class of dst then we can 2009 // safely do the copy. 2010 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2011 2012 const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf 2013 assert_different_registers(tmp, tmp2, sub_klass, super_klass); 2014 2015 __ load_klass(sub_klass, src); 2016 __ load_klass(super_klass, dst); 2017 2018 __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, 2019 &cont, copyfunc_addr != NULL ? ©func : &slow, NULL); 2020 2021 address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2022 //__ load_const_optimized(tmp, slow_stc, tmp2); 2023 __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); 2024 __ mtctr(tmp); 2025 __ bctrl(); // sets CR0 2026 __ beq(CCR0, cont); 2027 2028 if (copyfunc_addr != NULL) { // Use stub if available. 2029 __ bind(copyfunc); 2030 // Src is not a sub class of dst so we have to do a 2031 // per-element check. 2032 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2033 if ((flags & mask) != mask) { 2034 assert(flags & mask, "one of the two should be known to be an object array"); 2035 2036 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2037 __ load_klass(tmp, src); 2038 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2039 __ load_klass(tmp, dst); 2040 } 2041 2042 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 2043 2044 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2045 __ load_const_optimized(tmp, objArray_lh); 2046 __ cmpw(CCR0, tmp, tmp2); 2047 __ bne(CCR0, slow); 2048 } 2049 2050 Register src_ptr = R3_ARG1; 2051 Register dst_ptr = R4_ARG2; 2052 Register len = R5_ARG3; 2053 Register chk_off = R6_ARG4; 2054 Register super_k = R7_ARG5; 2055 2056 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2057 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2058 if (shift == 0) { 2059 __ add(src_ptr, src_pos, src_ptr); 2060 __ add(dst_ptr, dst_pos, dst_ptr); 2061 } else { 2062 __ sldi(tmp, src_pos, shift); 2063 __ sldi(tmp2, dst_pos, shift); 2064 __ add(src_ptr, tmp, src_ptr); 2065 __ add(dst_ptr, tmp2, dst_ptr); 2066 } 2067 2068 __ load_klass(tmp, dst); 2069 __ mr(len, length); 2070 2071 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2072 __ ld(super_k, ek_offset, tmp); 2073 2074 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2075 __ lwz(chk_off, sco_offset, super_k); 2076 2077 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 2078 2079 #ifndef PRODUCT 2080 if (PrintC1Statistics) { 2081 Label failed; 2082 __ cmpwi(CCR0, R3_RET, 0); 2083 __ bne(CCR0, failed); 2084 address counter = (address)&Runtime1::_arraycopy_checkcast_cnt; 2085 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2086 __ lwz(R11_scratch1, simm16_offs, tmp); 2087 __ addi(R11_scratch1, R11_scratch1, 1); 2088 __ stw(R11_scratch1, simm16_offs, tmp); 2089 __ bind(failed); 2090 } 2091 #endif 2092 2093 __ nand(tmp, R3_RET, R3_RET); 2094 __ cmpwi(CCR0, R3_RET, 0); 2095 __ beq(CCR0, *stub->continuation()); 2096 2097 #ifndef PRODUCT 2098 if (PrintC1Statistics) { 2099 address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt; 2100 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2101 __ lwz(R11_scratch1, simm16_offs, tmp); 2102 __ addi(R11_scratch1, R11_scratch1, 1); 2103 __ stw(R11_scratch1, simm16_offs, tmp); 2104 } 2105 #endif 2106 2107 __ subf(length, tmp, length); 2108 __ add(src_pos, tmp, src_pos); 2109 __ add(dst_pos, tmp, dst_pos); 2110 } 2111 } 2112 } 2113 __ bind(slow); 2114 __ b(*stub->entry()); 2115 __ bind(cont); 2116 2117 #ifdef ASSERT 2118 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2119 // Sanity check the known type with the incoming class. For the 2120 // primitive case the types must match exactly with src.klass and 2121 // dst.klass each exactly matching the default type. For the 2122 // object array case, if no type check is needed then either the 2123 // dst type is exactly the expected type and the src type is a 2124 // subtype which we can't check or src is the same array as dst 2125 // but not necessarily exactly of type default_type. 2126 Label known_ok, halt; 2127 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2128 if (UseCompressedClassPointers) { 2129 // Tmp holds the default type. It currently comes uncompressed after the 2130 // load of a constant, so encode it. 2131 __ encode_klass_not_null(tmp); 2132 // Load the raw value of the dst klass, since we will be comparing 2133 // uncompressed values directly. 2134 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2135 __ cmpw(CCR0, tmp, tmp2); 2136 if (basic_type != T_OBJECT) { 2137 __ bne(CCR0, halt); 2138 // Load the raw value of the src klass. 2139 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); 2140 __ cmpw(CCR0, tmp, tmp2); 2141 __ beq(CCR0, known_ok); 2142 } else { 2143 __ beq(CCR0, known_ok); 2144 __ cmpw(CCR0, src, dst); 2145 __ beq(CCR0, known_ok); 2146 } 2147 } else { 2148 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2149 __ cmpd(CCR0, tmp, tmp2); 2150 if (basic_type != T_OBJECT) { 2151 __ bne(CCR0, halt); 2152 // Load the raw value of the src klass. 2153 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); 2154 __ cmpd(CCR0, tmp, tmp2); 2155 __ beq(CCR0, known_ok); 2156 } else { 2157 __ beq(CCR0, known_ok); 2158 __ cmpd(CCR0, src, dst); 2159 __ beq(CCR0, known_ok); 2160 } 2161 } 2162 __ bind(halt); 2163 __ stop("incorrect type information in arraycopy"); 2164 __ bind(known_ok); 2165 } 2166 #endif 2167 2168 #ifndef PRODUCT 2169 if (PrintC1Statistics) { 2170 address counter = Runtime1::arraycopy_count_address(basic_type); 2171 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2172 __ lwz(R11_scratch1, simm16_offs, tmp); 2173 __ addi(R11_scratch1, R11_scratch1, 1); 2174 __ stw(R11_scratch1, simm16_offs, tmp); 2175 } 2176 #endif 2177 2178 Register src_ptr = R3_ARG1; 2179 Register dst_ptr = R4_ARG2; 2180 Register len = R5_ARG3; 2181 2182 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2183 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2184 if (shift == 0) { 2185 __ add(src_ptr, src_pos, src_ptr); 2186 __ add(dst_ptr, dst_pos, dst_ptr); 2187 } else { 2188 __ sldi(tmp, src_pos, shift); 2189 __ sldi(tmp2, dst_pos, shift); 2190 __ add(src_ptr, tmp, src_ptr); 2191 __ add(dst_ptr, tmp2, dst_ptr); 2192 } 2193 2194 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2195 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2196 const char *name; 2197 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2198 2199 // Arraycopy stubs takes a length in number of elements, so don't scale it. 2200 __ mr(len, length); 2201 __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0); 2202 2203 __ bind(*stub->continuation()); 2204 } 2205 2206 2207 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2208 if (dest->is_single_cpu()) { 2209 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5); 2210 #ifdef _LP64 2211 if (left->type() == T_OBJECT) { 2212 switch (code) { 2213 case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break; 2214 case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break; 2215 case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break; 2216 default: ShouldNotReachHere(); 2217 } 2218 } else 2219 #endif 2220 switch (code) { 2221 case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2222 case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2223 case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2224 default: ShouldNotReachHere(); 2225 } 2226 } else { 2227 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6); 2228 switch (code) { 2229 case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2230 case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2231 case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2232 default: ShouldNotReachHere(); 2233 } 2234 } 2235 } 2236 2237 2238 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2239 #ifdef _LP64 2240 if (left->type() == T_OBJECT) { 2241 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2242 if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); } 2243 else { 2244 switch (code) { 2245 case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break; 2246 case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break; 2247 case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break; 2248 default: ShouldNotReachHere(); 2249 } 2250 } 2251 return; 2252 } 2253 #endif 2254 2255 if (dest->is_single_cpu()) { 2256 count = count & 0x1F; // Java spec 2257 if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); } 2258 else { 2259 switch (code) { 2260 case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break; 2261 case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break; 2262 case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break; 2263 default: ShouldNotReachHere(); 2264 } 2265 } 2266 } else if (dest->is_double_cpu()) { 2267 count = count & 63; // Java spec 2268 if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); } 2269 else { 2270 switch (code) { 2271 case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2272 case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2273 case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2274 default: ShouldNotReachHere(); 2275 } 2276 } 2277 } else { 2278 ShouldNotReachHere(); 2279 } 2280 } 2281 2282 2283 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2284 if (op->init_check()) { 2285 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2286 explicit_null_check(op->klass()->as_register(), op->stub()->info()); 2287 } else { 2288 add_debug_info_for_null_check_here(op->stub()->info()); 2289 } 2290 __ lbz(op->tmp1()->as_register(), 2291 in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register()); 2292 __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2293 __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry()); 2294 } 2295 __ allocate_object(op->obj()->as_register(), 2296 op->tmp1()->as_register(), 2297 op->tmp2()->as_register(), 2298 op->tmp3()->as_register(), 2299 op->header_size(), 2300 op->object_size(), 2301 op->klass()->as_register(), 2302 *op->stub()->entry()); 2303 2304 __ bind(*op->stub()->continuation()); 2305 __ verify_oop(op->obj()->as_register()); 2306 } 2307 2308 2309 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2310 LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); ) 2311 if (UseSlowPath || 2312 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2313 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2314 __ b(*op->stub()->entry()); 2315 } else { 2316 __ allocate_array(op->obj()->as_register(), 2317 op->len()->as_register(), 2318 op->tmp1()->as_register(), 2319 op->tmp2()->as_register(), 2320 op->tmp3()->as_register(), 2321 arrayOopDesc::header_size(op->type()), 2322 type2aelembytes(op->type()), 2323 op->klass()->as_register(), 2324 *op->stub()->entry()); 2325 } 2326 __ bind(*op->stub()->continuation()); 2327 } 2328 2329 2330 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2331 ciMethodData *md, ciProfileData *data, 2332 Register recv, Register tmp1, Label* update_done) { 2333 uint i; 2334 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2335 Label next_test; 2336 // See if the receiver is receiver[n]. 2337 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2338 __ verify_klass_ptr(tmp1); 2339 __ cmpd(CCR0, recv, tmp1); 2340 __ bne(CCR0, next_test); 2341 2342 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2343 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2344 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2345 __ b(*update_done); 2346 2347 __ bind(next_test); 2348 } 2349 2350 // Didn't find receiver; find next empty slot and fill it in. 2351 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2352 Label next_test; 2353 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2354 __ cmpdi(CCR0, tmp1, 0); 2355 __ bne(CCR0, next_test); 2356 __ li(tmp1, DataLayout::counter_increment); 2357 __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2358 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2359 __ b(*update_done); 2360 2361 __ bind(next_test); 2362 } 2363 } 2364 2365 2366 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2367 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2368 md = method->method_data_or_null(); 2369 assert(md != NULL, "Sanity"); 2370 data = md->bci_to_data(bci); 2371 assert(data != NULL, "need data for checkcast"); 2372 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2373 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2374 // The offset is large so bias the mdo by the base of the slot so 2375 // that the ld can use simm16s to reference the slots of the data. 2376 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2377 } 2378 } 2379 2380 2381 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2382 Register obj = op->object()->as_register(); 2383 Register k_RInfo = op->tmp1()->as_register(); 2384 Register klass_RInfo = op->tmp2()->as_register(); 2385 Register Rtmp1 = op->tmp3()->as_register(); 2386 Register dst = op->result_opr()->as_register(); 2387 ciKlass* k = op->klass(); 2388 bool should_profile = op->should_profile(); 2389 bool move_obj_to_dst = (op->code() == lir_checkcast); 2390 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps. 2391 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1); 2392 bool restore_obj = move_obj_to_dst && reg_conflict; 2393 2394 __ cmpdi(CCR0, obj, 0); 2395 if (move_obj_to_dst || reg_conflict) { 2396 __ mr_if_needed(dst, obj); 2397 if (reg_conflict) { obj = dst; } 2398 } 2399 2400 ciMethodData* md; 2401 ciProfileData* data; 2402 int mdo_offset_bias = 0; 2403 if (should_profile) { 2404 ciMethod* method = op->profiled_method(); 2405 assert(method != NULL, "Should have method"); 2406 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2407 2408 Register mdo = k_RInfo; 2409 Register data_val = Rtmp1; 2410 Label not_null; 2411 __ bne(CCR0, not_null); 2412 metadata2reg(md->constant_encoding(), mdo); 2413 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2414 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2415 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2416 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2417 __ b(*obj_is_null); 2418 __ bind(not_null); 2419 } else { 2420 __ beq(CCR0, *obj_is_null); 2421 } 2422 2423 // get object class 2424 __ load_klass(klass_RInfo, obj); 2425 2426 if (k->is_loaded()) { 2427 metadata2reg(k->constant_encoding(), k_RInfo); 2428 } else { 2429 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2430 } 2431 2432 Label profile_cast_failure, failure_restore_obj, profile_cast_success; 2433 Label *failure_target = should_profile ? &profile_cast_failure : failure; 2434 Label *success_target = should_profile ? &profile_cast_success : success; 2435 2436 if (op->fast_check()) { 2437 assert_different_registers(klass_RInfo, k_RInfo); 2438 __ cmpd(CCR0, k_RInfo, klass_RInfo); 2439 if (should_profile) { 2440 __ bne(CCR0, *failure_target); 2441 // Fall through to success case. 2442 } else { 2443 __ beq(CCR0, *success); 2444 // Fall through to failure case. 2445 } 2446 } else { 2447 bool need_slow_path = true; 2448 if (k->is_loaded()) { 2449 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) { 2450 need_slow_path = false; 2451 } 2452 // Perform the fast part of the checking logic. 2453 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), 2454 failure_target, NULL, RegisterOrConstant(k->super_check_offset())); 2455 } else { 2456 // Perform the fast part of the checking logic. 2457 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); 2458 } 2459 if (!need_slow_path) { 2460 if (!should_profile) { __ b(*success); } 2461 } else { 2462 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2463 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2464 //__ load_const_optimized(Rtmp1, entry, R0); 2465 __ calculate_address_from_global_toc(Rtmp1, entry, true, true, false); 2466 __ mtctr(Rtmp1); 2467 __ bctrl(); // sets CR0 2468 if (should_profile) { 2469 __ bne(CCR0, *failure_target); 2470 // Fall through to success case. 2471 } else { 2472 __ beq(CCR0, *success); 2473 // Fall through to failure case. 2474 } 2475 } 2476 } 2477 2478 if (should_profile) { 2479 Register mdo = k_RInfo, recv = klass_RInfo; 2480 assert_different_registers(mdo, recv, Rtmp1); 2481 __ bind(profile_cast_success); 2482 metadata2reg(md->constant_encoding(), mdo); 2483 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2484 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success); 2485 __ b(*success); 2486 2487 // Cast failure case. 2488 __ bind(profile_cast_failure); 2489 metadata2reg(md->constant_encoding(), mdo); 2490 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2491 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2492 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment); 2493 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2494 } 2495 2496 __ bind(*failure); 2497 2498 if (restore_obj) { 2499 __ mr(op->object()->as_register(), dst); 2500 // Fall through to failure case. 2501 } 2502 } 2503 2504 2505 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2506 LIR_Code code = op->code(); 2507 if (code == lir_store_check) { 2508 Register value = op->object()->as_register(); 2509 Register array = op->array()->as_register(); 2510 Register k_RInfo = op->tmp1()->as_register(); 2511 Register klass_RInfo = op->tmp2()->as_register(); 2512 Register Rtmp1 = op->tmp3()->as_register(); 2513 bool should_profile = op->should_profile(); 2514 2515 __ verify_oop(value); 2516 CodeStub* stub = op->stub(); 2517 // Check if it needs to be profiled. 2518 ciMethodData* md; 2519 ciProfileData* data; 2520 int mdo_offset_bias = 0; 2521 if (should_profile) { 2522 ciMethod* method = op->profiled_method(); 2523 assert(method != NULL, "Should have method"); 2524 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2525 } 2526 Label profile_cast_success, failure, done; 2527 Label *success_target = should_profile ? &profile_cast_success : &done; 2528 2529 __ cmpdi(CCR0, value, 0); 2530 if (should_profile) { 2531 Label not_null; 2532 __ bne(CCR0, not_null); 2533 Register mdo = k_RInfo; 2534 Register data_val = Rtmp1; 2535 metadata2reg(md->constant_encoding(), mdo); 2536 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2537 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2538 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2539 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2540 __ b(done); 2541 __ bind(not_null); 2542 } else { 2543 __ beq(CCR0, done); 2544 } 2545 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2546 explicit_null_check(array, op->info_for_exception()); 2547 } else { 2548 add_debug_info_for_null_check_here(op->info_for_exception()); 2549 } 2550 __ load_klass(k_RInfo, array); 2551 __ load_klass(klass_RInfo, value); 2552 2553 // Get instance klass. 2554 __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); 2555 // Perform the fast part of the checking logic. 2556 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); 2557 2558 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2559 const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2560 //__ load_const_optimized(R0, slow_path); 2561 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); 2562 __ mtctr(R0); 2563 __ bctrl(); // sets CR0 2564 if (!should_profile) { 2565 __ beq(CCR0, done); 2566 __ bind(failure); 2567 } else { 2568 __ bne(CCR0, failure); 2569 // Fall through to the success case. 2570 2571 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2572 assert_different_registers(value, mdo, recv, tmp1); 2573 __ bind(profile_cast_success); 2574 metadata2reg(md->constant_encoding(), mdo); 2575 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2576 __ load_klass(recv, value); 2577 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2578 __ b(done); 2579 2580 // Cast failure case. 2581 __ bind(failure); 2582 metadata2reg(md->constant_encoding(), mdo); 2583 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2584 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2585 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2586 __ addi(tmp1, tmp1, -DataLayout::counter_increment); 2587 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2588 } 2589 __ b(*stub->entry()); 2590 __ bind(done); 2591 2592 } else if (code == lir_checkcast) { 2593 Label success, failure; 2594 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); // Moves obj to dst. 2595 __ b(*op->stub()->entry()); 2596 __ align(32, 12); 2597 __ bind(success); 2598 } else if (code == lir_instanceof) { 2599 Register dst = op->result_opr()->as_register(); 2600 Label success, failure, done; 2601 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure); 2602 __ li(dst, 0); 2603 __ b(done); 2604 __ align(32, 12); 2605 __ bind(success); 2606 __ li(dst, 1); 2607 __ bind(done); 2608 } else { 2609 ShouldNotReachHere(); 2610 } 2611 } 2612 2613 2614 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2615 Register addr = op->addr()->as_pointer_register(); 2616 Register cmp_value = noreg, new_value = noreg; 2617 bool is_64bit = false; 2618 2619 if (op->code() == lir_cas_long) { 2620 cmp_value = op->cmp_value()->as_register_lo(); 2621 new_value = op->new_value()->as_register_lo(); 2622 is_64bit = true; 2623 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2624 cmp_value = op->cmp_value()->as_register(); 2625 new_value = op->new_value()->as_register(); 2626 if (op->code() == lir_cas_obj) { 2627 if (UseCompressedOops) { 2628 Register t1 = op->tmp1()->as_register(); 2629 Register t2 = op->tmp2()->as_register(); 2630 cmp_value = __ encode_heap_oop(t1, cmp_value); 2631 new_value = __ encode_heap_oop(t2, new_value); 2632 } else { 2633 is_64bit = true; 2634 } 2635 } 2636 } else { 2637 Unimplemented(); 2638 } 2639 2640 if (is_64bit) { 2641 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2642 MacroAssembler::MemBarNone, 2643 MacroAssembler::cmpxchgx_hint_atomic_update(), 2644 noreg, NULL, /*check without ldarx first*/true); 2645 } else { 2646 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2647 MacroAssembler::MemBarNone, 2648 MacroAssembler::cmpxchgx_hint_atomic_update(), 2649 noreg, /*check without ldarx first*/true); 2650 } 2651 2652 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2653 __ isync(); 2654 } else { 2655 __ sync(); 2656 } 2657 } 2658 2659 2660 void LIR_Assembler::set_24bit_FPU() { 2661 Unimplemented(); 2662 } 2663 2664 void LIR_Assembler::reset_FPU() { 2665 Unimplemented(); 2666 } 2667 2668 2669 void LIR_Assembler::breakpoint() { 2670 __ illtrap(); 2671 } 2672 2673 2674 void LIR_Assembler::push(LIR_Opr opr) { 2675 Unimplemented(); 2676 } 2677 2678 void LIR_Assembler::pop(LIR_Opr opr) { 2679 Unimplemented(); 2680 } 2681 2682 2683 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2684 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2685 Register dst = dst_opr->as_register(); 2686 Register reg = mon_addr.base(); 2687 int offset = mon_addr.disp(); 2688 // Compute pointer to BasicLock. 2689 __ add_const_optimized(dst, reg, offset); 2690 } 2691 2692 2693 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2694 Register obj = op->obj_opr()->as_register(); 2695 Register hdr = op->hdr_opr()->as_register(); 2696 Register lock = op->lock_opr()->as_register(); 2697 2698 // Obj may not be an oop. 2699 if (op->code() == lir_lock) { 2700 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2701 if (UseFastLocking) { 2702 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2703 // Add debug info for NullPointerException only if one is possible. 2704 if (op->info() != NULL) { 2705 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2706 explicit_null_check(obj, op->info()); 2707 } else { 2708 add_debug_info_for_null_check_here(op->info()); 2709 } 2710 } 2711 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2712 } else { 2713 // always do slow locking 2714 // note: The slow locking code could be inlined here, however if we use 2715 // slow locking, speed doesn't matter anyway and this solution is 2716 // simpler and requires less duplicated code - additionally, the 2717 // slow locking code is the same in either case which simplifies 2718 // debugging. 2719 __ b(*op->stub()->entry()); 2720 } 2721 } else { 2722 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2723 if (UseFastLocking) { 2724 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2725 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2726 } else { 2727 // always do slow unlocking 2728 // note: The slow unlocking code could be inlined here, however if we use 2729 // slow unlocking, speed doesn't matter anyway and this solution is 2730 // simpler and requires less duplicated code - additionally, the 2731 // slow unlocking code is the same in either case which simplifies 2732 // debugging. 2733 __ b(*op->stub()->entry()); 2734 } 2735 } 2736 __ bind(*op->stub()->continuation()); 2737 } 2738 2739 2740 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2741 ciMethod* method = op->profiled_method(); 2742 int bci = op->profiled_bci(); 2743 ciMethod* callee = op->profiled_callee(); 2744 2745 // Update counter for all call types. 2746 ciMethodData* md = method->method_data_or_null(); 2747 assert(md != NULL, "Sanity"); 2748 ciProfileData* data = md->bci_to_data(bci); 2749 assert(data->is_CounterData(), "need CounterData for calls"); 2750 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2751 Register mdo = op->mdo()->as_register(); 2752 #ifdef _LP64 2753 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2754 Register tmp1 = op->tmp1()->as_register_lo(); 2755 #else 2756 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2757 Register tmp1 = op->tmp1()->as_register(); 2758 #endif 2759 metadata2reg(md->constant_encoding(), mdo); 2760 int mdo_offset_bias = 0; 2761 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2762 data->size_in_bytes())) { 2763 // The offset is large so bias the mdo by the base of the slot so 2764 // that the ld can use simm16s to reference the slots of the data. 2765 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2766 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2767 } 2768 2769 // Perform additional virtual call profiling for invokevirtual and 2770 // invokeinterface bytecodes 2771 if (op->should_profile_receiver_type()) { 2772 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2773 Register recv = op->recv()->as_register(); 2774 assert_different_registers(mdo, tmp1, recv); 2775 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2776 ciKlass* known_klass = op->known_holder(); 2777 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2778 // We know the type that will be seen at this call site; we can 2779 // statically update the MethodData* rather than needing to do 2780 // dynamic tests on the receiver type. 2781 2782 // NOTE: we should probably put a lock around this search to 2783 // avoid collisions by concurrent compilations. 2784 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2785 uint i; 2786 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2787 ciKlass* receiver = vc_data->receiver(i); 2788 if (known_klass->equals(receiver)) { 2789 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2790 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2791 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2792 return; 2793 } 2794 } 2795 2796 // Receiver type not found in profile data; select an empty slot. 2797 2798 // Note that this is less efficient than it should be because it 2799 // always does a write to the receiver part of the 2800 // VirtualCallData rather than just the first time. 2801 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2802 ciKlass* receiver = vc_data->receiver(i); 2803 if (receiver == NULL) { 2804 metadata2reg(known_klass->constant_encoding(), tmp1); 2805 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); 2806 2807 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2808 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2809 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2810 return; 2811 } 2812 } 2813 } else { 2814 __ load_klass(recv, recv); 2815 Label update_done; 2816 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2817 // Receiver did not match any saved receiver and there is no empty row for it. 2818 // Increment total counter to indicate polymorphic case. 2819 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2820 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2821 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2822 2823 __ bind(update_done); 2824 } 2825 } else { 2826 // Static call 2827 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2828 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2829 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2830 } 2831 } 2832 2833 2834 void LIR_Assembler::align_backward_branch_target() { 2835 __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary. 2836 } 2837 2838 2839 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2840 Unimplemented(); 2841 } 2842 2843 2844 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2845 assert(left->is_register(), "can only handle registers"); 2846 2847 if (left->is_single_cpu()) { 2848 __ neg(dest->as_register(), left->as_register()); 2849 } else if (left->is_single_fpu()) { 2850 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2851 } else if (left->is_double_fpu()) { 2852 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2853 } else { 2854 assert (left->is_double_cpu(), "Must be a long"); 2855 __ neg(dest->as_register_lo(), left->as_register_lo()); 2856 } 2857 } 2858 2859 2860 void LIR_Assembler::fxch(int i) { 2861 Unimplemented(); 2862 } 2863 2864 void LIR_Assembler::fld(int i) { 2865 Unimplemented(); 2866 } 2867 2868 void LIR_Assembler::ffree(int i) { 2869 Unimplemented(); 2870 } 2871 2872 2873 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2874 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2875 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2876 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2877 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2878 //__ load_const_optimized(R0, dest); 2879 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2880 __ mtctr(R0); 2881 __ bctrl(); 2882 assert(info != NULL, "sanity"); 2883 add_call_info_here(info); 2884 return; 2885 } 2886 2887 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2888 if (info != NULL) { 2889 add_call_info_here(info); 2890 } 2891 } 2892 2893 2894 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2895 ShouldNotReachHere(); // Not needed on _LP64. 2896 } 2897 2898 void LIR_Assembler::membar() { 2899 __ fence(); 2900 } 2901 2902 void LIR_Assembler::membar_acquire() { 2903 __ acquire(); 2904 } 2905 2906 void LIR_Assembler::membar_release() { 2907 __ release(); 2908 } 2909 2910 void LIR_Assembler::membar_loadload() { 2911 __ membar(Assembler::LoadLoad); 2912 } 2913 2914 void LIR_Assembler::membar_storestore() { 2915 __ membar(Assembler::StoreStore); 2916 } 2917 2918 void LIR_Assembler::membar_loadstore() { 2919 __ membar(Assembler::LoadStore); 2920 } 2921 2922 void LIR_Assembler::membar_storeload() { 2923 __ membar(Assembler::StoreLoad); 2924 } 2925 2926 void LIR_Assembler::on_spin_wait() { 2927 Unimplemented(); 2928 } 2929 2930 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 2931 LIR_Address* addr = addr_opr->as_address_ptr(); 2932 assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); 2933 if (addr->index()->is_illegal()) { 2934 __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp()); 2935 } else { 2936 assert(addr->disp() == 0, "can't have both: index and disp"); 2937 __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register()); 2938 } 2939 } 2940 2941 2942 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2943 ShouldNotReachHere(); 2944 } 2945 2946 2947 #ifdef ASSERT 2948 // Emit run-time assertion. 2949 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2950 Unimplemented(); 2951 } 2952 #endif 2953 2954 2955 void LIR_Assembler::peephole(LIR_List* lir) { 2956 // Optimize instruction pairs before emitting. 2957 LIR_OpList* inst = lir->instructions_list(); 2958 for (int i = 1; i < inst->length(); i++) { 2959 LIR_Op* op = inst->at(i); 2960 2961 // 2 register-register-moves 2962 if (op->code() == lir_move) { 2963 LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(), 2964 res2 = ((LIR_Op1*)op)->result_opr(); 2965 if (in2->is_register() && res2->is_register()) { 2966 LIR_Op* prev = inst->at(i - 1); 2967 if (prev && prev->code() == lir_move) { 2968 LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(), 2969 res1 = ((LIR_Op1*)prev)->result_opr(); 2970 if (in1->is_same_register(res2) && in2->is_same_register(res1)) { 2971 inst->remove_at(i); 2972 } 2973 } 2974 } 2975 } 2976 2977 } 2978 return; 2979 } 2980 2981 2982 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2983 const Register Rptr = src->as_pointer_register(), 2984 Rtmp = tmp->as_register(); 2985 Register Rco = noreg; 2986 if (UseCompressedOops && data->is_oop()) { 2987 Rco = __ encode_heap_oop(Rtmp, data->as_register()); 2988 } 2989 2990 Label Lretry; 2991 __ bind(Lretry); 2992 2993 if (data->type() == T_INT) { 2994 const Register Rold = dest->as_register(), 2995 Rsrc = data->as_register(); 2996 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 2997 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 2998 if (code == lir_xadd) { 2999 __ add(Rtmp, Rsrc, Rold); 3000 __ stwcx_(Rtmp, Rptr); 3001 } else { 3002 __ stwcx_(Rsrc, Rptr); 3003 } 3004 } else if (data->is_oop()) { 3005 assert(code == lir_xchg, "xadd for oops"); 3006 const Register Rold = dest->as_register(); 3007 if (UseCompressedOops) { 3008 assert_different_registers(Rptr, Rold, Rco); 3009 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3010 __ stwcx_(Rco, Rptr); 3011 } else { 3012 const Register Robj = data->as_register(); 3013 assert_different_registers(Rptr, Rold, Robj); 3014 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3015 __ stdcx_(Robj, Rptr); 3016 } 3017 } else if (data->type() == T_LONG) { 3018 const Register Rold = dest->as_register_lo(), 3019 Rsrc = data->as_register_lo(); 3020 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3021 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3022 if (code == lir_xadd) { 3023 __ add(Rtmp, Rsrc, Rold); 3024 __ stdcx_(Rtmp, Rptr); 3025 } else { 3026 __ stdcx_(Rsrc, Rptr); 3027 } 3028 } else { 3029 ShouldNotReachHere(); 3030 } 3031 3032 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 3033 __ bne_predict_not_taken(CCR0, Lretry); 3034 } else { 3035 __ bne( CCR0, Lretry); 3036 } 3037 3038 if (UseCompressedOops && data->is_oop()) { 3039 __ decode_heap_oop(dest->as_register()); 3040 } 3041 } 3042 3043 3044 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3045 Register obj = op->obj()->as_register(); 3046 Register tmp = op->tmp()->as_pointer_register(); 3047 LIR_Address* mdo_addr = op->mdp()->as_address_ptr(); 3048 ciKlass* exact_klass = op->exact_klass(); 3049 intptr_t current_klass = op->current_klass(); 3050 bool not_null = op->not_null(); 3051 bool no_conflict = op->no_conflict(); 3052 3053 Label Lupdate, Ldo_update, Ldone; 3054 3055 bool do_null = !not_null; 3056 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3057 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3058 3059 assert(do_null || do_update, "why are we here?"); 3060 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3061 3062 __ verify_oop(obj); 3063 3064 if (do_null) { 3065 if (!TypeEntries::was_null_seen(current_klass)) { 3066 __ cmpdi(CCR0, obj, 0); 3067 __ bne(CCR0, Lupdate); 3068 __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3069 __ ori(R0, R0, TypeEntries::null_seen); 3070 if (do_update) { 3071 __ b(Ldo_update); 3072 } else { 3073 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3074 } 3075 } else { 3076 if (do_update) { 3077 __ cmpdi(CCR0, obj, 0); 3078 __ beq(CCR0, Ldone); 3079 } 3080 } 3081 #ifdef ASSERT 3082 } else { 3083 __ cmpdi(CCR0, obj, 0); 3084 __ bne(CCR0, Lupdate); 3085 __ stop("unexpect null obj", 0x9652); 3086 #endif 3087 } 3088 3089 __ bind(Lupdate); 3090 if (do_update) { 3091 Label Lnext; 3092 const Register klass = R29_TOC; // kill and reload 3093 bool klass_reg_used = false; 3094 #ifdef ASSERT 3095 if (exact_klass != NULL) { 3096 Label ok; 3097 klass_reg_used = true; 3098 __ load_klass(klass, obj); 3099 metadata2reg(exact_klass->constant_encoding(), R0); 3100 __ cmpd(CCR0, klass, R0); 3101 __ beq(CCR0, ok); 3102 __ stop("exact klass and actual klass differ", 0x8564); 3103 __ bind(ok); 3104 } 3105 #endif 3106 3107 if (!no_conflict) { 3108 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3109 klass_reg_used = true; 3110 if (exact_klass != NULL) { 3111 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3112 metadata2reg(exact_klass->constant_encoding(), klass); 3113 } else { 3114 __ load_klass(klass, obj); 3115 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj 3116 } 3117 3118 // Like InterpreterMacroAssembler::profile_obj_type 3119 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3120 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3121 __ cmpd(CCR1, R0, klass); 3122 // Klass seen before, nothing to do (regardless of unknown bit). 3123 //beq(CCR1, do_nothing); 3124 3125 __ andi_(R0, klass, TypeEntries::type_unknown); 3126 // Already unknown. Nothing to do anymore. 3127 //bne(CCR0, do_nothing); 3128 __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 3129 __ beq(CCR0, Lnext); 3130 3131 if (TypeEntries::is_type_none(current_klass)) { 3132 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3133 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3134 __ beq(CCR0, Ldo_update); // First time here. Set profile type. 3135 } 3136 3137 } else { 3138 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3139 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3140 3141 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3142 __ andi_(R0, tmp, TypeEntries::type_unknown); 3143 // Already unknown. Nothing to do anymore. 3144 __ bne(CCR0, Lnext); 3145 } 3146 3147 // Different than before. Cannot keep accurate profile. 3148 __ ori(R0, tmp, TypeEntries::type_unknown); 3149 } else { 3150 // There's a single possible klass at this profile point 3151 assert(exact_klass != NULL, "should be"); 3152 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3153 3154 if (TypeEntries::is_type_none(current_klass)) { 3155 klass_reg_used = true; 3156 metadata2reg(exact_klass->constant_encoding(), klass); 3157 3158 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3159 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3160 __ cmpd(CCR1, R0, klass); 3161 // Klass seen before, nothing to do (regardless of unknown bit). 3162 __ beq(CCR1, Lnext); 3163 #ifdef ASSERT 3164 { 3165 Label ok; 3166 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3167 __ beq(CCR0, ok); // First time here. 3168 3169 __ stop("unexpected profiling mismatch", 0x7865); 3170 __ bind(ok); 3171 } 3172 #endif 3173 // First time here. Set profile type. 3174 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3175 } else { 3176 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3177 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3178 3179 // Already unknown. Nothing to do anymore. 3180 __ andi_(R0, tmp, TypeEntries::type_unknown); 3181 __ bne(CCR0, Lnext); 3182 3183 // Different than before. Cannot keep accurate profile. 3184 __ ori(R0, tmp, TypeEntries::type_unknown); 3185 } 3186 } 3187 3188 __ bind(Ldo_update); 3189 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3190 3191 __ bind(Lnext); 3192 if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit 3193 } 3194 __ bind(Ldone); 3195 } 3196 3197 3198 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3199 assert(op->crc()->is_single_cpu(), "crc must be register"); 3200 assert(op->val()->is_single_cpu(), "byte value must be register"); 3201 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3202 Register crc = op->crc()->as_register(); 3203 Register val = op->val()->as_register(); 3204 Register res = op->result_opr()->as_register(); 3205 3206 assert_different_registers(val, crc, res); 3207 3208 __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); 3209 __ kernel_crc32_singleByteReg(crc, val, res, true); 3210 __ mr(res, crc); 3211 } 3212 3213 #undef __