1 /* 2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Compilation.hpp" 28 #include "c1/c1_LIRAssembler.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "c1/c1_ValueStack.hpp" 32 #include "ci/ciArrayKlass.hpp" 33 #include "ci/ciInstance.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "gc/shared/barrierSet.hpp" 36 #include "gc/shared/cardTableModRefBS.hpp" 37 #include "nativeInst_ppc.hpp" 38 #include "oops/objArrayKlass.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 41 #define __ _masm-> 42 43 44 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5; 45 46 47 bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 48 Unimplemented(); return false; // Currently not used on this platform. 49 } 50 51 52 LIR_Opr LIR_Assembler::receiverOpr() { 53 return FrameMap::R3_oop_opr; 54 } 55 56 57 LIR_Opr LIR_Assembler::osrBufferPointer() { 58 return FrameMap::R3_opr; 59 } 60 61 62 // This specifies the stack pointer decrement needed to build the frame. 63 int LIR_Assembler::initial_frame_size_in_bytes() const { 64 return in_bytes(frame_map()->framesize_in_bytes()); 65 } 66 67 68 // Inline cache check: the inline cached class is in inline_cache_reg; 69 // we fetch the class of the receiver and compare it with the cached class. 70 // If they do not match we jump to slow case. 71 int LIR_Assembler::check_icache() { 72 int offset = __ offset(); 73 __ inline_cache_check(R3_ARG1, R19_inline_cache_reg); 74 return offset; 75 } 76 77 78 void LIR_Assembler::osr_entry() { 79 // On-stack-replacement entry sequence: 80 // 81 // 1. Create a new compiled activation. 82 // 2. Initialize local variables in the compiled activation. The expression 83 // stack must be empty at the osr_bci; it is not initialized. 84 // 3. Jump to the continuation address in compiled code to resume execution. 85 86 // OSR entry point 87 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 88 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 89 ValueStack* entry_state = osr_entry->end()->state(); 90 int number_of_locks = entry_state->locks_size(); 91 92 // Create a frame for the compiled activation. 93 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 94 95 // OSR buffer is 96 // 97 // locals[nlocals-1..0] 98 // monitors[number_of_locks-1..0] 99 // 100 // Locals is a direct copy of the interpreter frame so in the osr buffer 101 // the first slot in the local array is the last local from the interpreter 102 // and the last slot is local[0] (receiver) from the interpreter. 103 // 104 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 105 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 106 // in the interpreter frame (the method lock if a sync method). 107 108 // Initialize monitors in the compiled activation. 109 // R3: pointer to osr buffer 110 // 111 // All other registers are dead at this point and the locals will be 112 // copied into place by code emitted in the IR. 113 114 Register OSR_buf = osrBufferPointer()->as_register(); 115 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 116 int monitor_offset = BytesPerWord * method()->max_locals() + 117 (2 * BytesPerWord) * (number_of_locks - 1); 118 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 119 // the OSR buffer using 2 word entries: first the lock and then 120 // the oop. 121 for (int i = 0; i < number_of_locks; i++) { 122 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 123 #ifdef ASSERT 124 // Verify the interpreter's monitor has a non-null object. 125 { 126 Label L; 127 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 128 __ cmpdi(CCR0, R0, 0); 129 __ bne(CCR0, L); 130 __ stop("locked object is NULL"); 131 __ bind(L); 132 } 133 #endif // ASSERT 134 // Copy the lock field into the compiled activation. 135 Address ml = frame_map()->address_for_monitor_lock(i), 136 mo = frame_map()->address_for_monitor_object(i); 137 assert(ml.index() == noreg && mo.index() == noreg, "sanity"); 138 __ ld(R0, slot_offset + 0, OSR_buf); 139 __ std(R0, ml.disp(), ml.base()); 140 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 141 __ std(R0, mo.disp(), mo.base()); 142 } 143 } 144 } 145 146 147 int LIR_Assembler::emit_exception_handler() { 148 // If the last instruction is a call (typically to do a throw which 149 // is coming at the end after block reordering) the return address 150 // must still point into the code area in order to avoid assertion 151 // failures when searching for the corresponding bci => add a nop 152 // (was bug 5/14/1999 - gri). 153 __ nop(); 154 155 // Generate code for the exception handler. 156 address handler_base = __ start_a_stub(exception_handler_size()); 157 158 if (handler_base == NULL) { 159 // Not enough space left for the handler. 160 bailout("exception handler overflow"); 161 return -1; 162 } 163 164 int offset = code_offset(); 165 address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 166 //__ load_const_optimized(R0, entry_point); 167 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); 168 __ mtctr(R0); 169 __ bctr(); 170 171 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 172 __ end_a_stub(); 173 174 return offset; 175 } 176 177 178 // Emit the code to remove the frame from the stack in the exception 179 // unwind path. 180 int LIR_Assembler::emit_unwind_handler() { 181 _masm->block_comment("Unwind handler"); 182 183 int offset = code_offset(); 184 bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes(); 185 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31; 186 187 // Fetch the exception from TLS and clear out exception related thread state. 188 __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 189 __ li(R0, 0); 190 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 191 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 192 193 __ bind(_unwind_handler_entry); 194 __ verify_not_null_oop(Rexception); 195 if (preserve_exception) { __ mr(Rexception_save, Rexception); } 196 197 // Perform needed unlocking 198 MonitorExitStub* stub = NULL; 199 if (method()->is_synchronized()) { 200 monitor_address(0, FrameMap::R4_opr); 201 stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); 202 __ unlock_object(R5, R6, R4, *stub->entry()); 203 __ bind(*stub->continuation()); 204 } 205 206 if (compilation()->env()->dtrace_method_probes()) { 207 Unimplemented(); 208 } 209 210 // Dispatch to the unwind logic. 211 address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); 212 //__ load_const_optimized(R0, unwind_stub); 213 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); 214 if (preserve_exception) { __ mr(Rexception, Rexception_save); } 215 __ mtctr(R0); 216 __ bctr(); 217 218 // Emit the slow path assembly. 219 if (stub != NULL) { 220 stub->emit_code(this); 221 } 222 223 return offset; 224 } 225 226 227 int LIR_Assembler::emit_deopt_handler() { 228 // If the last instruction is a call (typically to do a throw which 229 // is coming at the end after block reordering) the return address 230 // must still point into the code area in order to avoid assertion 231 // failures when searching for the corresponding bci => add a nop 232 // (was bug 5/14/1999 - gri). 233 __ nop(); 234 235 // Generate code for deopt handler. 236 address handler_base = __ start_a_stub(deopt_handler_size()); 237 238 if (handler_base == NULL) { 239 // Not enough space left for the handler. 240 bailout("deopt handler overflow"); 241 return -1; 242 } 243 244 int offset = code_offset(); 245 __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); 246 247 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 248 __ end_a_stub(); 249 250 return offset; 251 } 252 253 254 void LIR_Assembler::jobject2reg(jobject o, Register reg) { 255 if (o == NULL) { 256 __ li(reg, 0); 257 } else { 258 AddressLiteral addrlit = __ constant_oop_address(o); 259 __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg); 260 } 261 } 262 263 264 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 265 // Allocate a new index in table to hold the object once it's been patched. 266 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 267 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 268 269 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); 270 __ load_const(reg, addrlit, R0); 271 272 patching_epilog(patch, lir_patch_normal, reg, info); 273 } 274 275 276 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 277 AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation) 278 __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg); 279 } 280 281 282 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 283 // Allocate a new index in table to hold the klass once it's been patched. 284 int index = __ oop_recorder()->allocate_metadata_index(NULL); 285 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 286 287 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); 288 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 289 __ load_const(reg, addrlit, R0); 290 291 patching_epilog(patch, lir_patch_normal, reg, info); 292 } 293 294 295 void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 296 const bool is_int = result->is_single_cpu(); 297 Register Rdividend = is_int ? left->as_register() : left->as_register_lo(); 298 Register Rdivisor = noreg; 299 Register Rscratch = temp->as_register(); 300 Register Rresult = is_int ? result->as_register() : result->as_register_lo(); 301 long divisor = -1; 302 303 if (right->is_register()) { 304 Rdivisor = is_int ? right->as_register() : right->as_register_lo(); 305 } else { 306 divisor = is_int ? right->as_constant_ptr()->as_jint() 307 : right->as_constant_ptr()->as_jlong(); 308 } 309 310 assert(Rdividend != Rscratch, ""); 311 assert(Rdivisor != Rscratch, ""); 312 assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv"); 313 314 if (Rdivisor == noreg) { 315 if (divisor == 1) { // stupid, but can happen 316 if (code == lir_idiv) { 317 __ mr_if_needed(Rresult, Rdividend); 318 } else { 319 __ li(Rresult, 0); 320 } 321 322 } else if (is_power_of_2(divisor)) { 323 // Convert division by a power of two into some shifts and logical operations. 324 int log2 = log2_intptr(divisor); 325 326 // Round towards 0. 327 if (divisor == 2) { 328 if (is_int) { 329 __ srwi(Rscratch, Rdividend, 31); 330 } else { 331 __ srdi(Rscratch, Rdividend, 63); 332 } 333 } else { 334 if (is_int) { 335 __ srawi(Rscratch, Rdividend, 31); 336 } else { 337 __ sradi(Rscratch, Rdividend, 63); 338 } 339 __ clrldi(Rscratch, Rscratch, 64-log2); 340 } 341 __ add(Rscratch, Rdividend, Rscratch); 342 343 if (code == lir_idiv) { 344 if (is_int) { 345 __ srawi(Rresult, Rscratch, log2); 346 } else { 347 __ sradi(Rresult, Rscratch, log2); 348 } 349 } else { // lir_irem 350 __ clrrdi(Rscratch, Rscratch, log2); 351 __ sub(Rresult, Rdividend, Rscratch); 352 } 353 354 } else if (divisor == -1) { 355 if (code == lir_idiv) { 356 __ neg(Rresult, Rdividend); 357 } else { 358 __ li(Rresult, 0); 359 } 360 361 } else { 362 __ load_const_optimized(Rscratch, divisor); 363 if (code == lir_idiv) { 364 if (is_int) { 365 __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 366 } else { 367 __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 368 } 369 } else { 370 assert(Rscratch != R0, "need both"); 371 if (is_int) { 372 __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1. 373 __ mullw(Rscratch, R0, Rscratch); 374 } else { 375 __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1. 376 __ mulld(Rscratch, R0, Rscratch); 377 } 378 __ sub(Rresult, Rdividend, Rscratch); 379 } 380 381 } 382 return; 383 } 384 385 Label regular, done; 386 if (is_int) { 387 __ cmpwi(CCR0, Rdivisor, -1); 388 } else { 389 __ cmpdi(CCR0, Rdivisor, -1); 390 } 391 __ bne(CCR0, regular); 392 if (code == lir_idiv) { 393 __ neg(Rresult, Rdividend); 394 __ b(done); 395 __ bind(regular); 396 if (is_int) { 397 __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 398 } else { 399 __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 400 } 401 } else { // lir_irem 402 __ li(Rresult, 0); 403 __ b(done); 404 __ bind(regular); 405 if (is_int) { 406 __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 407 __ mullw(Rscratch, Rscratch, Rdivisor); 408 } else { 409 __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 410 __ mulld(Rscratch, Rscratch, Rdivisor); 411 } 412 __ sub(Rresult, Rdividend, Rscratch); 413 } 414 __ bind(done); 415 } 416 417 418 void LIR_Assembler::emit_op3(LIR_Op3* op) { 419 switch (op->code()) { 420 case lir_idiv: 421 case lir_irem: 422 arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), 423 op->result_opr(), op->info()); 424 break; 425 case lir_fmad: 426 __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), 427 op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); 428 break; 429 case lir_fmaf: 430 __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), 431 op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); 432 break; 433 default: ShouldNotReachHere(); break; 434 } 435 } 436 437 438 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 439 #ifdef ASSERT 440 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 441 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 442 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 443 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 444 #endif 445 446 Label *L = op->label(); 447 if (op->cond() == lir_cond_always) { 448 __ b(*L); 449 } else { 450 Label done; 451 bool is_unordered = false; 452 if (op->code() == lir_cond_float_branch) { 453 assert(op->ublock() != NULL, "must have unordered successor"); 454 is_unordered = true; 455 } else { 456 assert(op->code() == lir_branch, "just checking"); 457 } 458 459 bool positive = false; 460 Assembler::Condition cond = Assembler::equal; 461 switch (op->cond()) { 462 case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break; 463 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break; 464 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 465 case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 466 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 467 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 468 case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 469 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 470 default: ShouldNotReachHere(); 471 } 472 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 473 int bi = Assembler::bi0(BOOL_RESULT, cond); 474 if (is_unordered) { 475 if (positive) { 476 if (op->ublock() == op->block()) { 477 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L); 478 } 479 } else { 480 if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); } 481 } 482 } 483 __ bc_far_optimized(bo, bi, *L); 484 __ bind(done); 485 } 486 } 487 488 489 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 490 Bytecodes::Code code = op->bytecode(); 491 LIR_Opr src = op->in_opr(), 492 dst = op->result_opr(); 493 494 switch(code) { 495 case Bytecodes::_i2l: { 496 __ extsw(dst->as_register_lo(), src->as_register()); 497 break; 498 } 499 case Bytecodes::_l2i: { 500 __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage 501 break; 502 } 503 case Bytecodes::_i2b: { 504 __ extsb(dst->as_register(), src->as_register()); 505 break; 506 } 507 case Bytecodes::_i2c: { 508 __ clrldi(dst->as_register(), src->as_register(), 64-16); 509 break; 510 } 511 case Bytecodes::_i2s: { 512 __ extsh(dst->as_register(), src->as_register()); 513 break; 514 } 515 case Bytecodes::_i2d: 516 case Bytecodes::_l2d: { 517 bool src_in_memory = src->is_stack(); 518 FloatRegister rdst = dst->as_double_reg(); 519 FloatRegister rsrc; 520 assert(src_in_memory || VM_Version::has_mtfprd(), "without mtfprd, must access src via memory"); 521 if (src_in_memory) { 522 rsrc = src->as_double_reg(); // via mem 523 } else { 524 // move src to dst register 525 if (code == Bytecodes::_i2d) { 526 __ mtfprwa(rdst, src->as_register()); 527 } else { 528 __ mtfprd(rdst, src->as_register_lo()); 529 } 530 rsrc = rdst; 531 } 532 __ fcfid(rdst, rsrc); 533 break; 534 } 535 case Bytecodes::_i2f: 536 case Bytecodes::_l2f: { 537 bool src_in_memory = src->is_stack(); 538 FloatRegister rdst = dst->as_float_reg(); 539 FloatRegister rsrc; 540 assert(src_in_memory || VM_Version::has_mtfprd(), "without mtfprd, must access src via memory"); 541 if (src_in_memory) { 542 rsrc = src->as_double_reg(); // via mem 543 } else { 544 // move src to dst register 545 if (code == Bytecodes::_i2f) { 546 __ mtfprwa(rdst, src->as_register()); 547 } else { 548 __ mtfprd(rdst, src->as_register_lo()); 549 } 550 rsrc = rdst; 551 } 552 if (VM_Version::has_fcfids()) { 553 __ fcfids(rdst, rsrc); 554 } else { 555 assert(code == Bytecodes::_i2f, "fcfid+frsp needs fixup code to avoid rounding incompatibility"); 556 __ fcfid(rdst, rsrc); 557 __ frsp(rdst, rdst); 558 } 559 break; 560 } 561 case Bytecodes::_f2d: { 562 __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); 563 break; 564 } 565 case Bytecodes::_d2f: { 566 __ frsp(dst->as_float_reg(), src->as_double_reg()); 567 break; 568 } 569 case Bytecodes::_d2i: 570 case Bytecodes::_f2i: { 571 bool dst_in_memory = dst->is_stack(); 572 FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg(); 573 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 574 Label L; 575 // Result must be 0 if value is NaN; test by comparing value to itself. 576 __ fcmpu(CCR0, rsrc, rsrc); 577 if (dst_in_memory) { 578 __ li(R0, 0); // 0 in case of NAN 579 __ std(R0, addr.disp(), addr.base()); 580 } else { 581 __ li(dst->as_register(), 0); 582 } 583 __ bso(CCR0, L); 584 __ fctiwz(rsrc, rsrc); // USE_KILL 585 assert(dst_in_memory || VM_Version::has_mtfprd(), "without mffprd, must place result in memory"); 586 if (dst_in_memory) { 587 __ stfd(rsrc, addr.disp(), addr.base()); 588 } else { 589 __ mffprd(dst->as_register(), rsrc); 590 } 591 __ bind(L); 592 break; 593 } 594 case Bytecodes::_d2l: 595 case Bytecodes::_f2l: { 596 bool dst_in_memory = dst->is_stack(); 597 FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg(); 598 Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : NULL; 599 Label L; 600 // Result must be 0 if value is NaN; test by comparing value to itself. 601 __ fcmpu(CCR0, rsrc, rsrc); 602 if (dst_in_memory) { 603 __ li(R0, 0); // 0 in case of NAN 604 __ std(R0, addr.disp(), addr.base()); 605 } else { 606 __ li(dst->as_register_lo(), 0); 607 } 608 __ bso(CCR0, L); 609 __ fctidz(rsrc, rsrc); // USE_KILL 610 assert(dst_in_memory || VM_Version::has_mtfprd(), "without mffprd, must place result in memory"); 611 if (dst_in_memory) { 612 __ stfd(rsrc, addr.disp(), addr.base()); 613 } else { 614 __ mffprd(dst->as_register_lo(), rsrc); 615 } 616 __ bind(L); 617 break; 618 } 619 620 default: ShouldNotReachHere(); 621 } 622 } 623 624 625 void LIR_Assembler::align_call(LIR_Code) { 626 // do nothing since all instructions are word aligned on ppc 627 } 628 629 630 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { 631 int start_offset = __ offset(); 632 // Put the entry point as a constant into the constant pool. 633 const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); 634 if (entry_point_toc_addr == NULL) { 635 bailout("const section overflow"); 636 return false; 637 } 638 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 639 640 // Emit the trampoline stub which will be related to the branch-and-link below. 641 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); 642 if (!stub) { 643 bailout("no space for trampoline stub"); 644 return false; 645 } 646 return true; 647 } 648 649 650 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 651 assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype"); 652 653 bool success = emit_trampoline_stub_for_call(op->addr()); 654 if (!success) { return; } 655 656 __ relocate(rtype); 657 // Note: At this point we do not have the address of the trampoline 658 // stub, and the entry point might be too far away for bl, so __ pc() 659 // serves as dummy and the bl will be patched later. 660 __ code()->set_insts_mark(); 661 __ bl(__ pc()); 662 add_call_info(code_offset(), op->info()); 663 } 664 665 666 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 667 __ calculate_address_from_global_toc(R2_TOC, __ method_toc()); 668 669 // Virtual call relocation will point to ic load. 670 address virtual_call_meta_addr = __ pc(); 671 // Load a clear inline cache. 672 AddressLiteral empty_ic((address) Universe::non_oop_word()); 673 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); 674 if (!success) { 675 bailout("const section overflow"); 676 return; 677 } 678 // Call to fixup routine. Fixup routine uses ScopeDesc info 679 // to determine who we intended to call. 680 __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); 681 682 success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); 683 if (!success) { return; } 684 685 // Note: At this point we do not have the address of the trampoline 686 // stub, and the entry point might be too far away for bl, so __ pc() 687 // serves as dummy and the bl will be patched later. 688 __ bl(__ pc()); 689 add_call_info(code_offset(), op->info()); 690 } 691 692 693 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 694 ShouldNotReachHere(); // ic_call is used instead. 695 } 696 697 698 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) { 699 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info); 700 __ null_check(addr, stub->entry()); 701 append_code_stub(stub); 702 } 703 704 705 // Attention: caller must encode oop if needed 706 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 707 int store_offset; 708 if (!Assembler::is_simm16(offset)) { 709 // For offsets larger than a simm16 we setup the offset. 710 assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case"); 711 __ load_const_optimized(R0, offset); 712 store_offset = store(from_reg, base, R0, type, wide); 713 } else { 714 store_offset = code_offset(); 715 switch (type) { 716 case T_BOOLEAN: // fall through 717 case T_BYTE : __ stb(from_reg->as_register(), offset, base); break; 718 case T_CHAR : 719 case T_SHORT : __ sth(from_reg->as_register(), offset, base); break; 720 case T_INT : __ stw(from_reg->as_register(), offset, base); break; 721 case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break; 722 case T_ADDRESS: 723 case T_METADATA: __ std(from_reg->as_register(), offset, base); break; 724 case T_ARRAY : // fall through 725 case T_OBJECT: 726 { 727 if (UseCompressedOops && !wide) { 728 // Encoding done in caller 729 __ stw(from_reg->as_register(), offset, base); 730 } else { 731 __ std(from_reg->as_register(), offset, base); 732 } 733 __ verify_oop(from_reg->as_register()); 734 break; 735 } 736 case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break; 737 case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break; 738 default : ShouldNotReachHere(); 739 } 740 } 741 return store_offset; 742 } 743 744 745 // Attention: caller must encode oop if needed 746 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 747 int store_offset = code_offset(); 748 switch (type) { 749 case T_BOOLEAN: // fall through 750 case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break; 751 case T_CHAR : 752 case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break; 753 case T_INT : __ stwx(from_reg->as_register(), base, disp); break; 754 case T_LONG : 755 #ifdef _LP64 756 __ stdx(from_reg->as_register_lo(), base, disp); 757 #else 758 Unimplemented(); 759 #endif 760 break; 761 case T_ADDRESS: 762 __ stdx(from_reg->as_register(), base, disp); 763 break; 764 case T_ARRAY : // fall through 765 case T_OBJECT: 766 { 767 if (UseCompressedOops && !wide) { 768 // Encoding done in caller. 769 __ stwx(from_reg->as_register(), base, disp); 770 } else { 771 __ stdx(from_reg->as_register(), base, disp); 772 } 773 __ verify_oop(from_reg->as_register()); // kills R0 774 break; 775 } 776 case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break; 777 case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break; 778 default : ShouldNotReachHere(); 779 } 780 return store_offset; 781 } 782 783 784 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 785 int load_offset; 786 if (!Assembler::is_simm16(offset)) { 787 // For offsets larger than a simm16 we setup the offset. 788 __ load_const_optimized(R0, offset); 789 load_offset = load(base, R0, to_reg, type, wide); 790 } else { 791 load_offset = code_offset(); 792 switch(type) { 793 case T_BOOLEAN: // fall through 794 case T_BYTE : __ lbz(to_reg->as_register(), offset, base); 795 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 796 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break; 797 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break; 798 case T_INT : __ lwa(to_reg->as_register(), offset, base); break; 799 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break; 800 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break; 801 case T_ADDRESS: 802 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 803 __ lwz(to_reg->as_register(), offset, base); 804 __ decode_klass_not_null(to_reg->as_register()); 805 } else { 806 __ ld(to_reg->as_register(), offset, base); 807 } 808 break; 809 case T_ARRAY : // fall through 810 case T_OBJECT: 811 { 812 if (UseCompressedOops && !wide) { 813 __ lwz(to_reg->as_register(), offset, base); 814 __ decode_heap_oop(to_reg->as_register()); 815 } else { 816 __ ld(to_reg->as_register(), offset, base); 817 } 818 __ verify_oop(to_reg->as_register()); 819 break; 820 } 821 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break; 822 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break; 823 default : ShouldNotReachHere(); 824 } 825 } 826 return load_offset; 827 } 828 829 830 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 831 int load_offset = code_offset(); 832 switch(type) { 833 case T_BOOLEAN: // fall through 834 case T_BYTE : __ lbzx(to_reg->as_register(), base, disp); 835 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 836 case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break; 837 case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break; 838 case T_INT : __ lwax(to_reg->as_register(), base, disp); break; 839 case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break; 840 case T_ARRAY : // fall through 841 case T_OBJECT: 842 { 843 if (UseCompressedOops && !wide) { 844 __ lwzx(to_reg->as_register(), base, disp); 845 __ decode_heap_oop(to_reg->as_register()); 846 } else { 847 __ ldx(to_reg->as_register(), base, disp); 848 } 849 __ verify_oop(to_reg->as_register()); 850 break; 851 } 852 case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break; 853 case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break; 854 case T_LONG : 855 #ifdef _LP64 856 __ ldx(to_reg->as_register_lo(), base, disp); 857 #else 858 Unimplemented(); 859 #endif 860 break; 861 default : ShouldNotReachHere(); 862 } 863 return load_offset; 864 } 865 866 867 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 868 LIR_Const* c = src->as_constant_ptr(); 869 Register src_reg = R0; 870 switch (c->type()) { 871 case T_INT: 872 case T_FLOAT: { 873 int value = c->as_jint_bits(); 874 __ load_const_optimized(src_reg, value); 875 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 876 __ stw(src_reg, addr.disp(), addr.base()); 877 break; 878 } 879 case T_ADDRESS: { 880 int value = c->as_jint_bits(); 881 __ load_const_optimized(src_reg, value); 882 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 883 __ std(src_reg, addr.disp(), addr.base()); 884 break; 885 } 886 case T_OBJECT: { 887 jobject2reg(c->as_jobject(), src_reg); 888 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 889 __ std(src_reg, addr.disp(), addr.base()); 890 break; 891 } 892 case T_LONG: 893 case T_DOUBLE: { 894 int value = c->as_jlong_bits(); 895 __ load_const_optimized(src_reg, value); 896 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 897 __ std(src_reg, addr.disp(), addr.base()); 898 break; 899 } 900 default: 901 Unimplemented(); 902 } 903 } 904 905 906 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 907 LIR_Const* c = src->as_constant_ptr(); 908 LIR_Address* addr = dest->as_address_ptr(); 909 Register base = addr->base()->as_pointer_register(); 910 LIR_Opr tmp = LIR_OprFact::illegalOpr; 911 int offset = -1; 912 // Null check for large offsets in LIRGenerator::do_StoreField. 913 bool needs_explicit_null_check = !ImplicitNullChecks; 914 915 if (info != NULL && needs_explicit_null_check) { 916 explicit_null_check(base, info); 917 } 918 919 switch (c->type()) { 920 case T_FLOAT: type = T_INT; 921 case T_INT: 922 case T_ADDRESS: { 923 tmp = FrameMap::R0_opr; 924 __ load_const_optimized(tmp->as_register(), c->as_jint_bits()); 925 break; 926 } 927 case T_DOUBLE: type = T_LONG; 928 case T_LONG: { 929 tmp = FrameMap::R0_long_opr; 930 __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits()); 931 break; 932 } 933 case T_OBJECT: { 934 tmp = FrameMap::R0_opr; 935 if (UseCompressedOops && !wide && c->as_jobject() != NULL) { 936 AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); 937 __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw). 938 __ relocate(oop_addr.rspec(), /*compressed format*/ 1); 939 __ ori(R0, R0, oop_addr.value() & 0xffff); 940 } else { 941 jobject2reg(c->as_jobject(), R0); 942 } 943 break; 944 } 945 default: 946 Unimplemented(); 947 } 948 949 // Handle either reg+reg or reg+disp address. 950 if (addr->index()->is_valid()) { 951 assert(addr->disp() == 0, "must be zero"); 952 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 953 } else { 954 assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses"); 955 offset = store(tmp, base, addr->disp(), type, wide, false); 956 } 957 958 if (info != NULL) { 959 assert(offset != -1, "offset should've been set"); 960 if (!needs_explicit_null_check) { 961 add_debug_info_for_null_check(offset, info); 962 } 963 } 964 } 965 966 967 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 968 LIR_Const* c = src->as_constant_ptr(); 969 LIR_Opr to_reg = dest; 970 971 switch (c->type()) { 972 case T_INT: { 973 assert(patch_code == lir_patch_none, "no patching handled here"); 974 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); 975 break; 976 } 977 case T_ADDRESS: { 978 assert(patch_code == lir_patch_none, "no patching handled here"); 979 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ... 980 break; 981 } 982 case T_LONG: { 983 assert(patch_code == lir_patch_none, "no patching handled here"); 984 __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0); 985 break; 986 } 987 988 case T_OBJECT: { 989 if (patch_code == lir_patch_none) { 990 jobject2reg(c->as_jobject(), to_reg->as_register()); 991 } else { 992 jobject2reg_with_patching(to_reg->as_register(), info); 993 } 994 break; 995 } 996 997 case T_METADATA: 998 { 999 if (patch_code == lir_patch_none) { 1000 metadata2reg(c->as_metadata(), to_reg->as_register()); 1001 } else { 1002 klass2reg_with_patching(to_reg->as_register(), info); 1003 } 1004 } 1005 break; 1006 1007 case T_FLOAT: 1008 { 1009 if (to_reg->is_single_fpu()) { 1010 address const_addr = __ float_constant(c->as_jfloat()); 1011 if (const_addr == NULL) { 1012 bailout("const section overflow"); 1013 break; 1014 } 1015 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1016 __ relocate(rspec); 1017 __ load_const(R0, const_addr); 1018 __ lfsx(to_reg->as_float_reg(), R0); 1019 } else { 1020 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 1021 __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0); 1022 } 1023 } 1024 break; 1025 1026 case T_DOUBLE: 1027 { 1028 if (to_reg->is_double_fpu()) { 1029 address const_addr = __ double_constant(c->as_jdouble()); 1030 if (const_addr == NULL) { 1031 bailout("const section overflow"); 1032 break; 1033 } 1034 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 1035 __ relocate(rspec); 1036 __ load_const(R0, const_addr); 1037 __ lfdx(to_reg->as_double_reg(), R0); 1038 } else { 1039 assert(to_reg->is_double_cpu(), "Must be a long register."); 1040 __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0); 1041 } 1042 } 1043 break; 1044 1045 default: 1046 ShouldNotReachHere(); 1047 } 1048 } 1049 1050 1051 Address LIR_Assembler::as_Address(LIR_Address* addr) { 1052 Unimplemented(); return Address(); 1053 } 1054 1055 1056 inline RegisterOrConstant index_or_disp(LIR_Address* addr) { 1057 if (addr->index()->is_illegal()) { 1058 return (RegisterOrConstant)(addr->disp()); 1059 } else { 1060 return (RegisterOrConstant)(addr->index()->as_pointer_register()); 1061 } 1062 } 1063 1064 1065 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1066 const Register tmp = R0; 1067 switch (type) { 1068 case T_INT: 1069 case T_FLOAT: { 1070 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1071 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1072 __ lwz(tmp, from.disp(), from.base()); 1073 __ stw(tmp, to.disp(), to.base()); 1074 break; 1075 } 1076 case T_ADDRESS: 1077 case T_OBJECT: { 1078 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1079 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1080 __ ld(tmp, from.disp(), from.base()); 1081 __ std(tmp, to.disp(), to.base()); 1082 break; 1083 } 1084 case T_LONG: 1085 case T_DOUBLE: { 1086 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1087 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1088 __ ld(tmp, from.disp(), from.base()); 1089 __ std(tmp, to.disp(), to.base()); 1090 break; 1091 } 1092 1093 default: 1094 ShouldNotReachHere(); 1095 } 1096 } 1097 1098 1099 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1100 Unimplemented(); return Address(); 1101 } 1102 1103 1104 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1105 Unimplemented(); return Address(); 1106 } 1107 1108 1109 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1110 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1111 1112 assert(type != T_METADATA, "load of metadata ptr not supported"); 1113 LIR_Address* addr = src_opr->as_address_ptr(); 1114 LIR_Opr to_reg = dest; 1115 1116 Register src = addr->base()->as_pointer_register(); 1117 Register disp_reg = noreg; 1118 int disp_value = addr->disp(); 1119 bool needs_patching = (patch_code != lir_patch_none); 1120 // null check for large offsets in LIRGenerator::do_LoadField 1121 bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; 1122 1123 if (info != NULL && needs_explicit_null_check) { 1124 explicit_null_check(src, info); 1125 } 1126 1127 if (addr->base()->type() == T_OBJECT) { 1128 __ verify_oop(src); 1129 } 1130 1131 PatchingStub* patch = NULL; 1132 if (needs_patching) { 1133 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1134 assert(!to_reg->is_double_cpu() || 1135 patch_code == lir_patch_none || 1136 patch_code == lir_patch_normal, "patching doesn't match register"); 1137 } 1138 1139 if (addr->index()->is_illegal()) { 1140 if (!Assembler::is_simm16(disp_value)) { 1141 if (needs_patching) { 1142 __ load_const32(R0, 0); // patchable int 1143 } else { 1144 __ load_const_optimized(R0, disp_value); 1145 } 1146 disp_reg = R0; 1147 } 1148 } else { 1149 disp_reg = addr->index()->as_pointer_register(); 1150 assert(disp_value == 0, "can't handle 3 operand addresses"); 1151 } 1152 1153 // Remember the offset of the load. The patching_epilog must be done 1154 // before the call to add_debug_info, otherwise the PcDescs don't get 1155 // entered in increasing order. 1156 int offset; 1157 1158 if (disp_reg == noreg) { 1159 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1160 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1161 } else { 1162 assert(!unaligned, "unexpected"); 1163 offset = load(src, disp_reg, to_reg, type, wide); 1164 } 1165 1166 if (patch != NULL) { 1167 patching_epilog(patch, patch_code, src, info); 1168 } 1169 if (info != NULL && !needs_explicit_null_check) { 1170 add_debug_info_for_null_check(offset, info); 1171 } 1172 } 1173 1174 1175 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1176 Address addr; 1177 if (src->is_single_word()) { 1178 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1179 } else if (src->is_double_word()) { 1180 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1181 } 1182 1183 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1184 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1185 } 1186 1187 1188 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1189 Address addr; 1190 if (dest->is_single_word()) { 1191 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1192 } else if (dest->is_double_word()) { 1193 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1194 } 1195 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1196 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1197 } 1198 1199 1200 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1201 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1202 if (from_reg->is_double_fpu()) { 1203 // double to double moves 1204 assert(to_reg->is_double_fpu(), "should match"); 1205 __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg()); 1206 } else { 1207 // float to float moves 1208 assert(to_reg->is_single_fpu(), "should match"); 1209 __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg()); 1210 } 1211 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1212 if (from_reg->is_double_cpu()) { 1213 __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1214 } else if (to_reg->is_double_cpu()) { 1215 // int to int moves 1216 __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register()); 1217 } else { 1218 // int to int moves 1219 __ mr_if_needed(to_reg->as_register(), from_reg->as_register()); 1220 } 1221 } else { 1222 ShouldNotReachHere(); 1223 } 1224 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1225 __ verify_oop(to_reg->as_register()); 1226 } 1227 } 1228 1229 1230 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1231 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1232 bool wide, bool unaligned) { 1233 assert(type != T_METADATA, "store of metadata ptr not supported"); 1234 LIR_Address* addr = dest->as_address_ptr(); 1235 1236 Register src = addr->base()->as_pointer_register(); 1237 Register disp_reg = noreg; 1238 int disp_value = addr->disp(); 1239 bool needs_patching = (patch_code != lir_patch_none); 1240 bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide && 1241 Universe::narrow_oop_mode() != Universe::UnscaledNarrowOop; 1242 bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value); 1243 bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29. 1244 // Null check for large offsets in LIRGenerator::do_StoreField. 1245 bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; 1246 1247 if (info != NULL && needs_explicit_null_check) { 1248 explicit_null_check(src, info); 1249 } 1250 1251 if (addr->base()->is_oop_register()) { 1252 __ verify_oop(src); 1253 } 1254 1255 PatchingStub* patch = NULL; 1256 if (needs_patching) { 1257 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1258 assert(!from_reg->is_double_cpu() || 1259 patch_code == lir_patch_none || 1260 patch_code == lir_patch_normal, "patching doesn't match register"); 1261 } 1262 1263 if (addr->index()->is_illegal()) { 1264 if (load_disp) { 1265 disp_reg = use_R29 ? R29_TOC : R0; 1266 if (needs_patching) { 1267 __ load_const32(disp_reg, 0); // patchable int 1268 } else { 1269 __ load_const_optimized(disp_reg, disp_value); 1270 } 1271 } 1272 } else { 1273 disp_reg = addr->index()->as_pointer_register(); 1274 assert(disp_value == 0, "can't handle 3 operand addresses"); 1275 } 1276 1277 // remember the offset of the store. The patching_epilog must be done 1278 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1279 // entered in increasing order. 1280 int offset; 1281 1282 if (compress_oop) { 1283 Register co = __ encode_heap_oop(R0, from_reg->as_register()); 1284 from_reg = FrameMap::as_opr(co); 1285 } 1286 1287 if (disp_reg == noreg) { 1288 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1289 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1290 } else { 1291 assert(!unaligned, "unexpected"); 1292 offset = store(from_reg, src, disp_reg, type, wide); 1293 } 1294 1295 if (use_R29) { 1296 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit 1297 } 1298 1299 if (patch != NULL) { 1300 patching_epilog(patch, patch_code, src, info); 1301 } 1302 1303 if (info != NULL && !needs_explicit_null_check) { 1304 add_debug_info_for_null_check(offset, info); 1305 } 1306 } 1307 1308 1309 void LIR_Assembler::return_op(LIR_Opr result) { 1310 const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). 1311 const Register polling_page = R12; 1312 1313 // Pop the stack before the safepoint code. 1314 int frame_size = initial_frame_size_in_bytes(); 1315 if (Assembler::is_simm(frame_size, 16)) { 1316 __ addi(R1_SP, R1_SP, frame_size); 1317 } else { 1318 __ pop_frame(); 1319 } 1320 1321 if (LoadPollAddressFromThread) { 1322 // TODO: PPC port __ ld(polling_page, in_bytes(JavaThread::poll_address_offset()), R16_thread); 1323 Unimplemented(); 1324 } else { 1325 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page() 1326 } 1327 1328 // Restore return pc relative to callers' sp. 1329 __ ld(return_pc, _abi(lr), R1_SP); 1330 // Move return pc to LR. 1331 __ mtlr(return_pc); 1332 1333 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1334 __ reserved_stack_check(return_pc); 1335 } 1336 1337 // We need to mark the code position where the load from the safepoint 1338 // polling page was emitted as relocInfo::poll_return_type here. 1339 __ relocate(relocInfo::poll_return_type); 1340 __ load_from_polling_page(polling_page); 1341 1342 // Return. 1343 __ blr(); 1344 } 1345 1346 1347 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1348 1349 if (LoadPollAddressFromThread) { 1350 const Register poll_addr = tmp->as_register(); 1351 // TODO: PPC port __ ld(poll_addr, in_bytes(JavaThread::poll_address_offset()), R16_thread); 1352 Unimplemented(); 1353 __ relocate(relocInfo::poll_type); // XXX 1354 guarantee(info != NULL, "Shouldn't be NULL"); 1355 int offset = __ offset(); 1356 add_debug_info_for_branch(info); 1357 __ load_from_polling_page(poll_addr); 1358 return offset; 1359 } 1360 1361 __ load_const_optimized(tmp->as_register(), (intptr_t)os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page() 1362 if (info != NULL) { 1363 add_debug_info_for_branch(info); 1364 } 1365 int offset = __ offset(); 1366 __ relocate(relocInfo::poll_type); 1367 __ load_from_polling_page(tmp->as_register()); 1368 1369 return offset; 1370 } 1371 1372 1373 void LIR_Assembler::emit_static_call_stub() { 1374 address call_pc = __ pc(); 1375 address stub = __ start_a_stub(static_call_stub_size()); 1376 if (stub == NULL) { 1377 bailout("static call stub overflow"); 1378 return; 1379 } 1380 1381 // For java_to_interp stubs we use R11_scratch1 as scratch register 1382 // and in call trampoline stubs we use R12_scratch2. This way we 1383 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1384 const Register reg_scratch = R11_scratch1; 1385 1386 // Create a static stub relocation which relates this stub 1387 // with the call instruction at insts_call_instruction_offset in the 1388 // instructions code-section. 1389 int start = __ offset(); 1390 __ relocate(static_stub_Relocation::spec(call_pc)); 1391 1392 // Now, create the stub's code: 1393 // - load the TOC 1394 // - load the inline cache oop from the constant pool 1395 // - load the call target from the constant pool 1396 // - call 1397 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1398 AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); 1399 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); 1400 1401 if (ReoptimizeCallSequences) { 1402 __ b64_patchable((address)-1, relocInfo::none); 1403 } else { 1404 AddressLiteral a((address)-1); 1405 success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); 1406 __ mtctr(reg_scratch); 1407 __ bctr(); 1408 } 1409 if (!success) { 1410 bailout("const section overflow"); 1411 return; 1412 } 1413 1414 assert(__ offset() - start <= static_call_stub_size(), "stub too big"); 1415 __ end_a_stub(); 1416 } 1417 1418 1419 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1420 bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual); 1421 if (opr1->is_single_fpu()) { 1422 __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg()); 1423 } else if (opr1->is_double_fpu()) { 1424 __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg()); 1425 } else if (opr1->is_single_cpu()) { 1426 if (opr2->is_constant()) { 1427 switch (opr2->as_constant_ptr()->type()) { 1428 case T_INT: 1429 { 1430 jint con = opr2->as_constant_ptr()->as_jint(); 1431 if (unsigned_comp) { 1432 if (Assembler::is_uimm(con, 16)) { 1433 __ cmplwi(BOOL_RESULT, opr1->as_register(), con); 1434 } else { 1435 __ load_const_optimized(R0, con); 1436 __ cmplw(BOOL_RESULT, opr1->as_register(), R0); 1437 } 1438 } else { 1439 if (Assembler::is_simm(con, 16)) { 1440 __ cmpwi(BOOL_RESULT, opr1->as_register(), con); 1441 } else { 1442 __ load_const_optimized(R0, con); 1443 __ cmpw(BOOL_RESULT, opr1->as_register(), R0); 1444 } 1445 } 1446 } 1447 break; 1448 1449 case T_OBJECT: 1450 // There are only equal/notequal comparisons on objects. 1451 { 1452 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1453 jobject con = opr2->as_constant_ptr()->as_jobject(); 1454 if (con == NULL) { 1455 __ cmpdi(BOOL_RESULT, opr1->as_register(), 0); 1456 } else { 1457 jobject2reg(con, R0); 1458 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1459 } 1460 } 1461 break; 1462 1463 default: 1464 ShouldNotReachHere(); 1465 break; 1466 } 1467 } else { 1468 if (opr2->is_address()) { 1469 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1470 LIR_Address *addr = opr2->as_address_ptr(); 1471 BasicType type = addr->type(); 1472 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1473 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1474 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1475 } else { 1476 if (unsigned_comp) { 1477 __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1478 } else { 1479 __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1480 } 1481 } 1482 } 1483 } else if (opr1->is_double_cpu()) { 1484 if (opr2->is_constant()) { 1485 jlong con = opr2->as_constant_ptr()->as_jlong(); 1486 if (unsigned_comp) { 1487 if (Assembler::is_uimm(con, 16)) { 1488 __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con); 1489 } else { 1490 __ load_const_optimized(R0, con); 1491 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0); 1492 } 1493 } else { 1494 if (Assembler::is_simm(con, 16)) { 1495 __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con); 1496 } else { 1497 __ load_const_optimized(R0, con); 1498 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0); 1499 } 1500 } 1501 } else if (opr2->is_register()) { 1502 if (unsigned_comp) { 1503 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1504 } else { 1505 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1506 } 1507 } else { 1508 ShouldNotReachHere(); 1509 } 1510 } else if (opr1->is_address()) { 1511 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1512 LIR_Address * addr = opr1->as_address_ptr(); 1513 BasicType type = addr->type(); 1514 assert (opr2->is_constant(), "Checking"); 1515 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1516 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1517 __ cmpdi(BOOL_RESULT, R0, opr2->as_constant_ptr()->as_jint()); 1518 } else { 1519 ShouldNotReachHere(); 1520 } 1521 } 1522 1523 1524 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1525 const Register Rdst = dst->as_register(); 1526 Label done; 1527 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1528 bool is_unordered_less = (code == lir_ucmp_fd2i); 1529 if (left->is_single_fpu()) { 1530 __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg()); 1531 } else if (left->is_double_fpu()) { 1532 __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg()); 1533 } else { 1534 ShouldNotReachHere(); 1535 } 1536 __ li(Rdst, is_unordered_less ? -1 : 1); 1537 __ bso(CCR0, done); 1538 } else if (code == lir_cmp_l2i) { 1539 __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo()); 1540 } else { 1541 ShouldNotReachHere(); 1542 } 1543 __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1544 __ srwi(Rdst, R0, 30); 1545 __ srawi(R0, R0, 31); 1546 __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1 1547 __ bind(done); 1548 } 1549 1550 1551 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { 1552 if (src->is_constant()) { 1553 lasm->const2reg(src, dst, lir_patch_none, NULL); 1554 } else if (src->is_register()) { 1555 lasm->reg2reg(src, dst); 1556 } else if (src->is_stack()) { 1557 lasm->stack2reg(src, dst, dst->type()); 1558 } else { 1559 ShouldNotReachHere(); 1560 } 1561 } 1562 1563 1564 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1565 if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) { 1566 load_to_reg(this, opr1, result); // Condition doesn't matter. 1567 return; 1568 } 1569 1570 bool positive = false; 1571 Assembler::Condition cond = Assembler::equal; 1572 switch (condition) { 1573 case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; 1574 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; 1575 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 1576 case lir_cond_belowEqual: 1577 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 1578 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 1579 case lir_cond_aboveEqual: 1580 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 1581 default: ShouldNotReachHere(); 1582 } 1583 1584 // Try to use isel on >=Power7. 1585 if (VM_Version::has_isel() && result->is_cpu_register()) { 1586 bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); 1587 const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo(); 1588 1589 // We can use result_reg to load one operand if not already in register. 1590 Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg, 1591 second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg; 1592 1593 if (first != second) { 1594 if (!o1_is_reg) { 1595 load_to_reg(this, opr1, result); 1596 } 1597 1598 if (!o2_is_reg) { 1599 load_to_reg(this, opr2, result); 1600 } 1601 1602 __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second); 1603 return; 1604 } 1605 } // isel 1606 1607 load_to_reg(this, opr1, result); 1608 1609 Label skip; 1610 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1611 int bi = Assembler::bi0(BOOL_RESULT, cond); 1612 __ bc(bo, bi, skip); 1613 1614 load_to_reg(this, opr2, result); 1615 __ bind(skip); 1616 } 1617 1618 1619 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1620 CodeEmitInfo* info, bool pop_fpu_stack) { 1621 assert(info == NULL, "unused on this code path"); 1622 assert(left->is_register(), "wrong items state"); 1623 assert(dest->is_register(), "wrong items state"); 1624 1625 if (right->is_register()) { 1626 if (dest->is_float_kind()) { 1627 1628 FloatRegister lreg, rreg, res; 1629 if (right->is_single_fpu()) { 1630 lreg = left->as_float_reg(); 1631 rreg = right->as_float_reg(); 1632 res = dest->as_float_reg(); 1633 switch (code) { 1634 case lir_add: __ fadds(res, lreg, rreg); break; 1635 case lir_sub: __ fsubs(res, lreg, rreg); break; 1636 case lir_mul: // fall through 1637 case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break; 1638 case lir_div: // fall through 1639 case lir_div_strictfp: __ fdivs(res, lreg, rreg); break; 1640 default: ShouldNotReachHere(); 1641 } 1642 } else { 1643 lreg = left->as_double_reg(); 1644 rreg = right->as_double_reg(); 1645 res = dest->as_double_reg(); 1646 switch (code) { 1647 case lir_add: __ fadd(res, lreg, rreg); break; 1648 case lir_sub: __ fsub(res, lreg, rreg); break; 1649 case lir_mul: // fall through 1650 case lir_mul_strictfp: __ fmul(res, lreg, rreg); break; 1651 case lir_div: // fall through 1652 case lir_div_strictfp: __ fdiv(res, lreg, rreg); break; 1653 default: ShouldNotReachHere(); 1654 } 1655 } 1656 1657 } else if (dest->is_double_cpu()) { 1658 1659 Register dst_lo = dest->as_register_lo(); 1660 Register op1_lo = left->as_pointer_register(); 1661 Register op2_lo = right->as_pointer_register(); 1662 1663 switch (code) { 1664 case lir_add: __ add(dst_lo, op1_lo, op2_lo); break; 1665 case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break; 1666 case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break; 1667 default: ShouldNotReachHere(); 1668 } 1669 } else { 1670 assert (right->is_single_cpu(), "Just Checking"); 1671 1672 Register lreg = left->as_register(); 1673 Register res = dest->as_register(); 1674 Register rreg = right->as_register(); 1675 switch (code) { 1676 case lir_add: __ add (res, lreg, rreg); break; 1677 case lir_sub: __ sub (res, lreg, rreg); break; 1678 case lir_mul: __ mullw(res, lreg, rreg); break; 1679 default: ShouldNotReachHere(); 1680 } 1681 } 1682 } else { 1683 assert (right->is_constant(), "must be constant"); 1684 1685 if (dest->is_single_cpu()) { 1686 Register lreg = left->as_register(); 1687 Register res = dest->as_register(); 1688 int simm16 = right->as_constant_ptr()->as_jint(); 1689 1690 switch (code) { 1691 case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int 1692 simm16 = -simm16; 1693 case lir_add: if (res == lreg && simm16 == 0) break; 1694 __ addi(res, lreg, simm16); break; 1695 case lir_mul: if (res == lreg && simm16 == 1) break; 1696 __ mulli(res, lreg, simm16); break; 1697 default: ShouldNotReachHere(); 1698 } 1699 } else { 1700 Register lreg = left->as_pointer_register(); 1701 Register res = dest->as_register_lo(); 1702 long con = right->as_constant_ptr()->as_jlong(); 1703 assert(Assembler::is_simm16(con), "must be simm16"); 1704 1705 switch (code) { 1706 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1707 con = -con; 1708 case lir_add: if (res == lreg && con == 0) break; 1709 __ addi(res, lreg, (int)con); break; 1710 case lir_mul: if (res == lreg && con == 1) break; 1711 __ mulli(res, lreg, (int)con); break; 1712 default: ShouldNotReachHere(); 1713 } 1714 } 1715 } 1716 } 1717 1718 1719 void LIR_Assembler::fpop() { 1720 Unimplemented(); 1721 // do nothing 1722 } 1723 1724 1725 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1726 switch (code) { 1727 case lir_sqrt: { 1728 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1729 break; 1730 } 1731 case lir_abs: { 1732 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1733 break; 1734 } 1735 default: { 1736 ShouldNotReachHere(); 1737 break; 1738 } 1739 } 1740 } 1741 1742 1743 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1744 if (right->is_constant()) { // see do_LogicOp 1745 long uimm; 1746 Register d, l; 1747 if (dest->is_single_cpu()) { 1748 uimm = right->as_constant_ptr()->as_jint(); 1749 d = dest->as_register(); 1750 l = left->as_register(); 1751 } else { 1752 uimm = right->as_constant_ptr()->as_jlong(); 1753 d = dest->as_register_lo(); 1754 l = left->as_register_lo(); 1755 } 1756 long uimms = (unsigned long)uimm >> 16, 1757 uimmss = (unsigned long)uimm >> 32; 1758 1759 switch (code) { 1760 case lir_logic_and: 1761 if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) { 1762 __ andi(d, l, uimm); // special cases 1763 } else if (uimms != 0) { __ andis_(d, l, uimms); } 1764 else { __ andi_(d, l, uimm); } 1765 break; 1766 1767 case lir_logic_or: 1768 if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); } 1769 else { __ ori(d, l, uimm); } 1770 break; 1771 1772 case lir_logic_xor: 1773 if (uimm == -1) { __ nand(d, l, l); } // special case 1774 else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); } 1775 else { __ xori(d, l, uimm); } 1776 break; 1777 1778 default: ShouldNotReachHere(); 1779 } 1780 } else { 1781 assert(right->is_register(), "right should be in register"); 1782 1783 if (dest->is_single_cpu()) { 1784 switch (code) { 1785 case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break; 1786 case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break; 1787 case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break; 1788 default: ShouldNotReachHere(); 1789 } 1790 } else { 1791 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1792 left->as_register_lo(); 1793 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1794 right->as_register_lo(); 1795 1796 switch (code) { 1797 case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break; 1798 case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break; 1799 case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break; 1800 default: ShouldNotReachHere(); 1801 } 1802 } 1803 } 1804 } 1805 1806 1807 int LIR_Assembler::shift_amount(BasicType t) { 1808 int elem_size = type2aelembytes(t); 1809 switch (elem_size) { 1810 case 1 : return 0; 1811 case 2 : return 1; 1812 case 4 : return 2; 1813 case 8 : return 3; 1814 } 1815 ShouldNotReachHere(); 1816 return -1; 1817 } 1818 1819 1820 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1821 info->add_register_oop(exceptionOop); 1822 1823 // Reuse the debug info from the safepoint poll for the throw op itself. 1824 address pc_for_athrow = __ pc(); 1825 int pc_for_athrow_offset = __ offset(); 1826 //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1827 //__ relocate(rspec); 1828 //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0); 1829 __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); 1830 add_call_info(pc_for_athrow_offset, info); // for exception handler 1831 1832 address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1833 : Runtime1::handle_exception_nofpu_id); 1834 //__ load_const_optimized(R0, stub); 1835 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 1836 __ mtctr(R0); 1837 __ bctr(); 1838 } 1839 1840 1841 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1842 // Note: Not used with EnableDebuggingOnDemand. 1843 assert(exceptionOop->as_register() == R3, "should match"); 1844 __ b(_unwind_handler_entry); 1845 } 1846 1847 1848 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1849 Register src = op->src()->as_register(); 1850 Register dst = op->dst()->as_register(); 1851 Register src_pos = op->src_pos()->as_register(); 1852 Register dst_pos = op->dst_pos()->as_register(); 1853 Register length = op->length()->as_register(); 1854 Register tmp = op->tmp()->as_register(); 1855 Register tmp2 = R0; 1856 1857 int flags = op->flags(); 1858 ciArrayKlass* default_type = op->expected_type(); 1859 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1860 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1861 1862 // Set up the arraycopy stub information. 1863 ArrayCopyStub* stub = op->stub(); 1864 const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. 1865 1866 // Always do stub if no type information is available. It's ok if 1867 // the known type isn't loaded since the code sanity checks 1868 // in debug mode and the type isn't required when we know the exact type 1869 // also check that the type is an array type. 1870 if (op->expected_type() == NULL) { 1871 assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && 1872 length->is_nonvolatile(), "must preserve"); 1873 // 3 parms are int. Convert to long. 1874 __ mr(R3_ARG1, src); 1875 __ extsw(R4_ARG2, src_pos); 1876 __ mr(R5_ARG3, dst); 1877 __ extsw(R6_ARG4, dst_pos); 1878 __ extsw(R7_ARG5, length); 1879 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1880 1881 if (copyfunc_addr == NULL) { // Use C version if stub was not generated. 1882 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 1883 __ call_c_with_frame_resize(entry, frame_resize); 1884 } else { 1885 #ifndef PRODUCT 1886 if (PrintC1Statistics) { 1887 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1888 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 1889 __ lwz(R11_scratch1, simm16_offs, tmp); 1890 __ addi(R11_scratch1, R11_scratch1, 1); 1891 __ stw(R11_scratch1, simm16_offs, tmp); 1892 } 1893 #endif 1894 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 1895 1896 __ nand(tmp, R3_RET, R3_RET); 1897 __ subf(length, tmp, length); 1898 __ add(src_pos, tmp, src_pos); 1899 __ add(dst_pos, tmp, dst_pos); 1900 } 1901 1902 __ cmpwi(CCR0, R3_RET, 0); 1903 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry()); 1904 __ bind(*stub->continuation()); 1905 return; 1906 } 1907 1908 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1909 Label cont, slow, copyfunc; 1910 1911 bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | 1912 LIR_OpArrayCopy::dst_null_check | 1913 LIR_OpArrayCopy::src_pos_positive_check | 1914 LIR_OpArrayCopy::dst_pos_positive_check | 1915 LIR_OpArrayCopy::length_positive_check); 1916 1917 // Use only one conditional branch for simple checks. 1918 if (simple_check_flag_set) { 1919 ConditionRegister combined_check = CCR1, tmp_check = CCR1; 1920 1921 // Make sure src and dst are non-null. 1922 if (flags & LIR_OpArrayCopy::src_null_check) { 1923 __ cmpdi(combined_check, src, 0); 1924 tmp_check = CCR0; 1925 } 1926 1927 if (flags & LIR_OpArrayCopy::dst_null_check) { 1928 __ cmpdi(tmp_check, dst, 0); 1929 if (tmp_check != combined_check) { 1930 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal); 1931 } 1932 tmp_check = CCR0; 1933 } 1934 1935 // Clear combined_check.eq if not already used. 1936 if (tmp_check == combined_check) { 1937 __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal); 1938 tmp_check = CCR0; 1939 } 1940 1941 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1942 // Test src_pos register. 1943 __ cmpwi(tmp_check, src_pos, 0); 1944 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1945 } 1946 1947 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1948 // Test dst_pos register. 1949 __ cmpwi(tmp_check, dst_pos, 0); 1950 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1951 } 1952 1953 if (flags & LIR_OpArrayCopy::length_positive_check) { 1954 // Make sure length isn't negative. 1955 __ cmpwi(tmp_check, length, 0); 1956 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1957 } 1958 1959 __ beq(combined_check, slow); 1960 } 1961 1962 // If the compiler was not able to prove that exact type of the source or the destination 1963 // of the arraycopy is an array type, check at runtime if the source or the destination is 1964 // an instance type. 1965 if (flags & LIR_OpArrayCopy::type_check) { 1966 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 1967 __ load_klass(tmp, dst); 1968 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1969 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1970 __ bge(CCR0, slow); 1971 } 1972 1973 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 1974 __ load_klass(tmp, src); 1975 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1976 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1977 __ bge(CCR0, slow); 1978 } 1979 } 1980 1981 // Higher 32bits must be null. 1982 __ extsw(length, length); 1983 1984 __ extsw(src_pos, src_pos); 1985 if (flags & LIR_OpArrayCopy::src_range_check) { 1986 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src); 1987 __ add(tmp, length, src_pos); 1988 __ cmpld(CCR0, tmp2, tmp); 1989 __ ble(CCR0, slow); 1990 } 1991 1992 __ extsw(dst_pos, dst_pos); 1993 if (flags & LIR_OpArrayCopy::dst_range_check) { 1994 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst); 1995 __ add(tmp, length, dst_pos); 1996 __ cmpld(CCR0, tmp2, tmp); 1997 __ ble(CCR0, slow); 1998 } 1999 2000 int shift = shift_amount(basic_type); 2001 2002 if (!(flags & LIR_OpArrayCopy::type_check)) { 2003 __ b(cont); 2004 } else { 2005 // We don't know the array types are compatible. 2006 if (basic_type != T_OBJECT) { 2007 // Simple test for basic type arrays. 2008 if (UseCompressedClassPointers) { 2009 // We don't need decode because we just need to compare. 2010 __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); 2011 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2012 __ cmpw(CCR0, tmp, tmp2); 2013 } else { 2014 __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); 2015 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2016 __ cmpd(CCR0, tmp, tmp2); 2017 } 2018 __ beq(CCR0, cont); 2019 } else { 2020 // For object arrays, if src is a sub class of dst then we can 2021 // safely do the copy. 2022 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 2023 2024 const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf 2025 assert_different_registers(tmp, tmp2, sub_klass, super_klass); 2026 2027 __ load_klass(sub_klass, src); 2028 __ load_klass(super_klass, dst); 2029 2030 __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, 2031 &cont, copyfunc_addr != NULL ? ©func : &slow, NULL); 2032 2033 address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2034 //__ load_const_optimized(tmp, slow_stc, tmp2); 2035 __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); 2036 __ mtctr(tmp); 2037 __ bctrl(); // sets CR0 2038 __ beq(CCR0, cont); 2039 2040 if (copyfunc_addr != NULL) { // Use stub if available. 2041 __ bind(copyfunc); 2042 // Src is not a sub class of dst so we have to do a 2043 // per-element check. 2044 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2045 if ((flags & mask) != mask) { 2046 assert(flags & mask, "one of the two should be known to be an object array"); 2047 2048 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2049 __ load_klass(tmp, src); 2050 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2051 __ load_klass(tmp, dst); 2052 } 2053 2054 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 2055 2056 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2057 __ load_const_optimized(tmp, objArray_lh); 2058 __ cmpw(CCR0, tmp, tmp2); 2059 __ bne(CCR0, slow); 2060 } 2061 2062 Register src_ptr = R3_ARG1; 2063 Register dst_ptr = R4_ARG2; 2064 Register len = R5_ARG3; 2065 Register chk_off = R6_ARG4; 2066 Register super_k = R7_ARG5; 2067 2068 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2069 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2070 if (shift == 0) { 2071 __ add(src_ptr, src_pos, src_ptr); 2072 __ add(dst_ptr, dst_pos, dst_ptr); 2073 } else { 2074 __ sldi(tmp, src_pos, shift); 2075 __ sldi(tmp2, dst_pos, shift); 2076 __ add(src_ptr, tmp, src_ptr); 2077 __ add(dst_ptr, tmp2, dst_ptr); 2078 } 2079 2080 __ load_klass(tmp, dst); 2081 __ mr(len, length); 2082 2083 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2084 __ ld(super_k, ek_offset, tmp); 2085 2086 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2087 __ lwz(chk_off, sco_offset, super_k); 2088 2089 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 2090 2091 #ifndef PRODUCT 2092 if (PrintC1Statistics) { 2093 Label failed; 2094 __ cmpwi(CCR0, R3_RET, 0); 2095 __ bne(CCR0, failed); 2096 address counter = (address)&Runtime1::_arraycopy_checkcast_cnt; 2097 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2098 __ lwz(R11_scratch1, simm16_offs, tmp); 2099 __ addi(R11_scratch1, R11_scratch1, 1); 2100 __ stw(R11_scratch1, simm16_offs, tmp); 2101 __ bind(failed); 2102 } 2103 #endif 2104 2105 __ nand(tmp, R3_RET, R3_RET); 2106 __ cmpwi(CCR0, R3_RET, 0); 2107 __ beq(CCR0, *stub->continuation()); 2108 2109 #ifndef PRODUCT 2110 if (PrintC1Statistics) { 2111 address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt; 2112 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2113 __ lwz(R11_scratch1, simm16_offs, tmp); 2114 __ addi(R11_scratch1, R11_scratch1, 1); 2115 __ stw(R11_scratch1, simm16_offs, tmp); 2116 } 2117 #endif 2118 2119 __ subf(length, tmp, length); 2120 __ add(src_pos, tmp, src_pos); 2121 __ add(dst_pos, tmp, dst_pos); 2122 } 2123 } 2124 } 2125 __ bind(slow); 2126 __ b(*stub->entry()); 2127 __ bind(cont); 2128 2129 #ifdef ASSERT 2130 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2131 // Sanity check the known type with the incoming class. For the 2132 // primitive case the types must match exactly with src.klass and 2133 // dst.klass each exactly matching the default type. For the 2134 // object array case, if no type check is needed then either the 2135 // dst type is exactly the expected type and the src type is a 2136 // subtype which we can't check or src is the same array as dst 2137 // but not necessarily exactly of type default_type. 2138 Label known_ok, halt; 2139 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2140 if (UseCompressedClassPointers) { 2141 // Tmp holds the default type. It currently comes uncompressed after the 2142 // load of a constant, so encode it. 2143 __ encode_klass_not_null(tmp); 2144 // Load the raw value of the dst klass, since we will be comparing 2145 // uncompressed values directly. 2146 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2147 __ cmpw(CCR0, tmp, tmp2); 2148 if (basic_type != T_OBJECT) { 2149 __ bne(CCR0, halt); 2150 // Load the raw value of the src klass. 2151 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); 2152 __ cmpw(CCR0, tmp, tmp2); 2153 __ beq(CCR0, known_ok); 2154 } else { 2155 __ beq(CCR0, known_ok); 2156 __ cmpw(CCR0, src, dst); 2157 __ beq(CCR0, known_ok); 2158 } 2159 } else { 2160 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2161 __ cmpd(CCR0, tmp, tmp2); 2162 if (basic_type != T_OBJECT) { 2163 __ bne(CCR0, halt); 2164 // Load the raw value of the src klass. 2165 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); 2166 __ cmpd(CCR0, tmp, tmp2); 2167 __ beq(CCR0, known_ok); 2168 } else { 2169 __ beq(CCR0, known_ok); 2170 __ cmpd(CCR0, src, dst); 2171 __ beq(CCR0, known_ok); 2172 } 2173 } 2174 __ bind(halt); 2175 __ stop("incorrect type information in arraycopy"); 2176 __ bind(known_ok); 2177 } 2178 #endif 2179 2180 #ifndef PRODUCT 2181 if (PrintC1Statistics) { 2182 address counter = Runtime1::arraycopy_count_address(basic_type); 2183 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2184 __ lwz(R11_scratch1, simm16_offs, tmp); 2185 __ addi(R11_scratch1, R11_scratch1, 1); 2186 __ stw(R11_scratch1, simm16_offs, tmp); 2187 } 2188 #endif 2189 2190 Register src_ptr = R3_ARG1; 2191 Register dst_ptr = R4_ARG2; 2192 Register len = R5_ARG3; 2193 2194 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2195 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2196 if (shift == 0) { 2197 __ add(src_ptr, src_pos, src_ptr); 2198 __ add(dst_ptr, dst_pos, dst_ptr); 2199 } else { 2200 __ sldi(tmp, src_pos, shift); 2201 __ sldi(tmp2, dst_pos, shift); 2202 __ add(src_ptr, tmp, src_ptr); 2203 __ add(dst_ptr, tmp2, dst_ptr); 2204 } 2205 2206 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2207 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2208 const char *name; 2209 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2210 2211 // Arraycopy stubs takes a length in number of elements, so don't scale it. 2212 __ mr(len, length); 2213 __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0); 2214 2215 __ bind(*stub->continuation()); 2216 } 2217 2218 2219 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2220 if (dest->is_single_cpu()) { 2221 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5); 2222 #ifdef _LP64 2223 if (left->type() == T_OBJECT) { 2224 switch (code) { 2225 case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break; 2226 case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break; 2227 case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break; 2228 default: ShouldNotReachHere(); 2229 } 2230 } else 2231 #endif 2232 switch (code) { 2233 case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2234 case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2235 case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2236 default: ShouldNotReachHere(); 2237 } 2238 } else { 2239 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6); 2240 switch (code) { 2241 case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2242 case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2243 case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2244 default: ShouldNotReachHere(); 2245 } 2246 } 2247 } 2248 2249 2250 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2251 #ifdef _LP64 2252 if (left->type() == T_OBJECT) { 2253 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2254 if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); } 2255 else { 2256 switch (code) { 2257 case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break; 2258 case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break; 2259 case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break; 2260 default: ShouldNotReachHere(); 2261 } 2262 } 2263 return; 2264 } 2265 #endif 2266 2267 if (dest->is_single_cpu()) { 2268 count = count & 0x1F; // Java spec 2269 if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); } 2270 else { 2271 switch (code) { 2272 case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break; 2273 case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break; 2274 case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break; 2275 default: ShouldNotReachHere(); 2276 } 2277 } 2278 } else if (dest->is_double_cpu()) { 2279 count = count & 63; // Java spec 2280 if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); } 2281 else { 2282 switch (code) { 2283 case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2284 case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2285 case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2286 default: ShouldNotReachHere(); 2287 } 2288 } 2289 } else { 2290 ShouldNotReachHere(); 2291 } 2292 } 2293 2294 2295 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2296 if (op->init_check()) { 2297 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2298 explicit_null_check(op->klass()->as_register(), op->stub()->info()); 2299 } else { 2300 add_debug_info_for_null_check_here(op->stub()->info()); 2301 } 2302 __ lbz(op->tmp1()->as_register(), 2303 in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register()); 2304 __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2305 __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry()); 2306 } 2307 __ allocate_object(op->obj()->as_register(), 2308 op->tmp1()->as_register(), 2309 op->tmp2()->as_register(), 2310 op->tmp3()->as_register(), 2311 op->header_size(), 2312 op->object_size(), 2313 op->klass()->as_register(), 2314 *op->stub()->entry()); 2315 2316 __ bind(*op->stub()->continuation()); 2317 __ verify_oop(op->obj()->as_register()); 2318 } 2319 2320 2321 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2322 LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); ) 2323 if (UseSlowPath || 2324 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2325 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2326 __ b(*op->stub()->entry()); 2327 } else { 2328 __ allocate_array(op->obj()->as_register(), 2329 op->len()->as_register(), 2330 op->tmp1()->as_register(), 2331 op->tmp2()->as_register(), 2332 op->tmp3()->as_register(), 2333 arrayOopDesc::header_size(op->type()), 2334 type2aelembytes(op->type()), 2335 op->klass()->as_register(), 2336 *op->stub()->entry()); 2337 } 2338 __ bind(*op->stub()->continuation()); 2339 } 2340 2341 2342 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2343 ciMethodData *md, ciProfileData *data, 2344 Register recv, Register tmp1, Label* update_done) { 2345 uint i; 2346 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2347 Label next_test; 2348 // See if the receiver is receiver[n]. 2349 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2350 __ verify_klass_ptr(tmp1); 2351 __ cmpd(CCR0, recv, tmp1); 2352 __ bne(CCR0, next_test); 2353 2354 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2355 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2356 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2357 __ b(*update_done); 2358 2359 __ bind(next_test); 2360 } 2361 2362 // Didn't find receiver; find next empty slot and fill it in. 2363 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2364 Label next_test; 2365 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2366 __ cmpdi(CCR0, tmp1, 0); 2367 __ bne(CCR0, next_test); 2368 __ li(tmp1, DataLayout::counter_increment); 2369 __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2370 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2371 __ b(*update_done); 2372 2373 __ bind(next_test); 2374 } 2375 } 2376 2377 2378 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2379 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2380 md = method->method_data_or_null(); 2381 assert(md != NULL, "Sanity"); 2382 data = md->bci_to_data(bci); 2383 assert(data != NULL, "need data for checkcast"); 2384 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2385 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2386 // The offset is large so bias the mdo by the base of the slot so 2387 // that the ld can use simm16s to reference the slots of the data. 2388 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2389 } 2390 } 2391 2392 2393 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2394 Register obj = op->object()->as_register(); 2395 Register k_RInfo = op->tmp1()->as_register(); 2396 Register klass_RInfo = op->tmp2()->as_register(); 2397 Register Rtmp1 = op->tmp3()->as_register(); 2398 Register dst = op->result_opr()->as_register(); 2399 ciKlass* k = op->klass(); 2400 bool should_profile = op->should_profile(); 2401 bool move_obj_to_dst = (op->code() == lir_checkcast); 2402 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps. 2403 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1); 2404 bool restore_obj = move_obj_to_dst && reg_conflict; 2405 2406 __ cmpdi(CCR0, obj, 0); 2407 if (move_obj_to_dst || reg_conflict) { 2408 __ mr_if_needed(dst, obj); 2409 if (reg_conflict) { obj = dst; } 2410 } 2411 2412 ciMethodData* md; 2413 ciProfileData* data; 2414 int mdo_offset_bias = 0; 2415 if (should_profile) { 2416 ciMethod* method = op->profiled_method(); 2417 assert(method != NULL, "Should have method"); 2418 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2419 2420 Register mdo = k_RInfo; 2421 Register data_val = Rtmp1; 2422 Label not_null; 2423 __ bne(CCR0, not_null); 2424 metadata2reg(md->constant_encoding(), mdo); 2425 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2426 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2427 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2428 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2429 __ b(*obj_is_null); 2430 __ bind(not_null); 2431 } else { 2432 __ beq(CCR0, *obj_is_null); 2433 } 2434 2435 // get object class 2436 __ load_klass(klass_RInfo, obj); 2437 2438 if (k->is_loaded()) { 2439 metadata2reg(k->constant_encoding(), k_RInfo); 2440 } else { 2441 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2442 } 2443 2444 Label profile_cast_failure, failure_restore_obj, profile_cast_success; 2445 Label *failure_target = should_profile ? &profile_cast_failure : failure; 2446 Label *success_target = should_profile ? &profile_cast_success : success; 2447 2448 if (op->fast_check()) { 2449 assert_different_registers(klass_RInfo, k_RInfo); 2450 __ cmpd(CCR0, k_RInfo, klass_RInfo); 2451 if (should_profile) { 2452 __ bne(CCR0, *failure_target); 2453 // Fall through to success case. 2454 } else { 2455 __ beq(CCR0, *success); 2456 // Fall through to failure case. 2457 } 2458 } else { 2459 bool need_slow_path = true; 2460 if (k->is_loaded()) { 2461 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) { 2462 need_slow_path = false; 2463 } 2464 // Perform the fast part of the checking logic. 2465 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), 2466 failure_target, NULL, RegisterOrConstant(k->super_check_offset())); 2467 } else { 2468 // Perform the fast part of the checking logic. 2469 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); 2470 } 2471 if (!need_slow_path) { 2472 if (!should_profile) { __ b(*success); } 2473 } else { 2474 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2475 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2476 //__ load_const_optimized(Rtmp1, entry, R0); 2477 __ calculate_address_from_global_toc(Rtmp1, entry, true, true, false); 2478 __ mtctr(Rtmp1); 2479 __ bctrl(); // sets CR0 2480 if (should_profile) { 2481 __ bne(CCR0, *failure_target); 2482 // Fall through to success case. 2483 } else { 2484 __ beq(CCR0, *success); 2485 // Fall through to failure case. 2486 } 2487 } 2488 } 2489 2490 if (should_profile) { 2491 Register mdo = k_RInfo, recv = klass_RInfo; 2492 assert_different_registers(mdo, recv, Rtmp1); 2493 __ bind(profile_cast_success); 2494 metadata2reg(md->constant_encoding(), mdo); 2495 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2496 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success); 2497 __ b(*success); 2498 2499 // Cast failure case. 2500 __ bind(profile_cast_failure); 2501 metadata2reg(md->constant_encoding(), mdo); 2502 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2503 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2504 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment); 2505 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2506 } 2507 2508 __ bind(*failure); 2509 2510 if (restore_obj) { 2511 __ mr(op->object()->as_register(), dst); 2512 // Fall through to failure case. 2513 } 2514 } 2515 2516 2517 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2518 LIR_Code code = op->code(); 2519 if (code == lir_store_check) { 2520 Register value = op->object()->as_register(); 2521 Register array = op->array()->as_register(); 2522 Register k_RInfo = op->tmp1()->as_register(); 2523 Register klass_RInfo = op->tmp2()->as_register(); 2524 Register Rtmp1 = op->tmp3()->as_register(); 2525 bool should_profile = op->should_profile(); 2526 2527 __ verify_oop(value); 2528 CodeStub* stub = op->stub(); 2529 // Check if it needs to be profiled. 2530 ciMethodData* md; 2531 ciProfileData* data; 2532 int mdo_offset_bias = 0; 2533 if (should_profile) { 2534 ciMethod* method = op->profiled_method(); 2535 assert(method != NULL, "Should have method"); 2536 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2537 } 2538 Label profile_cast_success, failure, done; 2539 Label *success_target = should_profile ? &profile_cast_success : &done; 2540 2541 __ cmpdi(CCR0, value, 0); 2542 if (should_profile) { 2543 Label not_null; 2544 __ bne(CCR0, not_null); 2545 Register mdo = k_RInfo; 2546 Register data_val = Rtmp1; 2547 metadata2reg(md->constant_encoding(), mdo); 2548 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2549 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2550 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2551 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2552 __ b(done); 2553 __ bind(not_null); 2554 } else { 2555 __ beq(CCR0, done); 2556 } 2557 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2558 explicit_null_check(array, op->info_for_exception()); 2559 } else { 2560 add_debug_info_for_null_check_here(op->info_for_exception()); 2561 } 2562 __ load_klass(k_RInfo, array); 2563 __ load_klass(klass_RInfo, value); 2564 2565 // Get instance klass. 2566 __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); 2567 // Perform the fast part of the checking logic. 2568 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); 2569 2570 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2571 const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2572 //__ load_const_optimized(R0, slow_path); 2573 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); 2574 __ mtctr(R0); 2575 __ bctrl(); // sets CR0 2576 if (!should_profile) { 2577 __ beq(CCR0, done); 2578 __ bind(failure); 2579 } else { 2580 __ bne(CCR0, failure); 2581 // Fall through to the success case. 2582 2583 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2584 assert_different_registers(value, mdo, recv, tmp1); 2585 __ bind(profile_cast_success); 2586 metadata2reg(md->constant_encoding(), mdo); 2587 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2588 __ load_klass(recv, value); 2589 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2590 __ b(done); 2591 2592 // Cast failure case. 2593 __ bind(failure); 2594 metadata2reg(md->constant_encoding(), mdo); 2595 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2596 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2597 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2598 __ addi(tmp1, tmp1, -DataLayout::counter_increment); 2599 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2600 } 2601 __ b(*stub->entry()); 2602 __ bind(done); 2603 2604 } else if (code == lir_checkcast) { 2605 Label success, failure; 2606 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); // Moves obj to dst. 2607 __ b(*op->stub()->entry()); 2608 __ align(32, 12); 2609 __ bind(success); 2610 } else if (code == lir_instanceof) { 2611 Register dst = op->result_opr()->as_register(); 2612 Label success, failure, done; 2613 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure); 2614 __ li(dst, 0); 2615 __ b(done); 2616 __ align(32, 12); 2617 __ bind(success); 2618 __ li(dst, 1); 2619 __ bind(done); 2620 } else { 2621 ShouldNotReachHere(); 2622 } 2623 } 2624 2625 2626 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2627 Register addr = op->addr()->as_pointer_register(); 2628 Register cmp_value = noreg, new_value = noreg; 2629 bool is_64bit = false; 2630 2631 if (op->code() == lir_cas_long) { 2632 cmp_value = op->cmp_value()->as_register_lo(); 2633 new_value = op->new_value()->as_register_lo(); 2634 is_64bit = true; 2635 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2636 cmp_value = op->cmp_value()->as_register(); 2637 new_value = op->new_value()->as_register(); 2638 if (op->code() == lir_cas_obj) { 2639 if (UseCompressedOops) { 2640 Register t1 = op->tmp1()->as_register(); 2641 Register t2 = op->tmp2()->as_register(); 2642 cmp_value = __ encode_heap_oop(t1, cmp_value); 2643 new_value = __ encode_heap_oop(t2, new_value); 2644 } else { 2645 is_64bit = true; 2646 } 2647 } 2648 } else { 2649 Unimplemented(); 2650 } 2651 2652 if (is_64bit) { 2653 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2654 MacroAssembler::MemBarNone, 2655 MacroAssembler::cmpxchgx_hint_atomic_update(), 2656 noreg, NULL, /*check without ldarx first*/true); 2657 } else { 2658 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2659 MacroAssembler::MemBarNone, 2660 MacroAssembler::cmpxchgx_hint_atomic_update(), 2661 noreg, /*check without ldarx first*/true); 2662 } 2663 2664 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2665 __ isync(); 2666 } else { 2667 __ sync(); 2668 } 2669 } 2670 2671 2672 void LIR_Assembler::set_24bit_FPU() { 2673 Unimplemented(); 2674 } 2675 2676 void LIR_Assembler::reset_FPU() { 2677 Unimplemented(); 2678 } 2679 2680 2681 void LIR_Assembler::breakpoint() { 2682 __ illtrap(); 2683 } 2684 2685 2686 void LIR_Assembler::push(LIR_Opr opr) { 2687 Unimplemented(); 2688 } 2689 2690 void LIR_Assembler::pop(LIR_Opr opr) { 2691 Unimplemented(); 2692 } 2693 2694 2695 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2696 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2697 Register dst = dst_opr->as_register(); 2698 Register reg = mon_addr.base(); 2699 int offset = mon_addr.disp(); 2700 // Compute pointer to BasicLock. 2701 __ add_const_optimized(dst, reg, offset); 2702 } 2703 2704 2705 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2706 Register obj = op->obj_opr()->as_register(); 2707 Register hdr = op->hdr_opr()->as_register(); 2708 Register lock = op->lock_opr()->as_register(); 2709 2710 // Obj may not be an oop. 2711 if (op->code() == lir_lock) { 2712 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2713 if (UseFastLocking) { 2714 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2715 // Add debug info for NullPointerException only if one is possible. 2716 if (op->info() != NULL) { 2717 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2718 explicit_null_check(obj, op->info()); 2719 } else { 2720 add_debug_info_for_null_check_here(op->info()); 2721 } 2722 } 2723 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2724 } else { 2725 // always do slow locking 2726 // note: The slow locking code could be inlined here, however if we use 2727 // slow locking, speed doesn't matter anyway and this solution is 2728 // simpler and requires less duplicated code - additionally, the 2729 // slow locking code is the same in either case which simplifies 2730 // debugging. 2731 __ b(*op->stub()->entry()); 2732 } 2733 } else { 2734 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2735 if (UseFastLocking) { 2736 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2737 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2738 } else { 2739 // always do slow unlocking 2740 // note: The slow unlocking code could be inlined here, however if we use 2741 // slow unlocking, speed doesn't matter anyway and this solution is 2742 // simpler and requires less duplicated code - additionally, the 2743 // slow unlocking code is the same in either case which simplifies 2744 // debugging. 2745 __ b(*op->stub()->entry()); 2746 } 2747 } 2748 __ bind(*op->stub()->continuation()); 2749 } 2750 2751 2752 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2753 ciMethod* method = op->profiled_method(); 2754 int bci = op->profiled_bci(); 2755 ciMethod* callee = op->profiled_callee(); 2756 2757 // Update counter for all call types. 2758 ciMethodData* md = method->method_data_or_null(); 2759 assert(md != NULL, "Sanity"); 2760 ciProfileData* data = md->bci_to_data(bci); 2761 assert(data->is_CounterData(), "need CounterData for calls"); 2762 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2763 Register mdo = op->mdo()->as_register(); 2764 #ifdef _LP64 2765 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2766 Register tmp1 = op->tmp1()->as_register_lo(); 2767 #else 2768 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2769 Register tmp1 = op->tmp1()->as_register(); 2770 #endif 2771 metadata2reg(md->constant_encoding(), mdo); 2772 int mdo_offset_bias = 0; 2773 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2774 data->size_in_bytes())) { 2775 // The offset is large so bias the mdo by the base of the slot so 2776 // that the ld can use simm16s to reference the slots of the data. 2777 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2778 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2779 } 2780 2781 Bytecodes::Code bc = method->java_code_at_bci(bci); 2782 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 2783 // Perform additional virtual call profiling for invokevirtual and 2784 // invokeinterface bytecodes. 2785 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2786 !callee_is_static && // Required for optimized MH invokes. 2787 C1ProfileVirtualCalls) { 2788 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2789 Register recv = op->recv()->as_register(); 2790 assert_different_registers(mdo, tmp1, recv); 2791 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2792 ciKlass* known_klass = op->known_holder(); 2793 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2794 // We know the type that will be seen at this call site; we can 2795 // statically update the MethodData* rather than needing to do 2796 // dynamic tests on the receiver type. 2797 2798 // NOTE: we should probably put a lock around this search to 2799 // avoid collisions by concurrent compilations. 2800 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2801 uint i; 2802 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2803 ciKlass* receiver = vc_data->receiver(i); 2804 if (known_klass->equals(receiver)) { 2805 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2806 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2807 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2808 return; 2809 } 2810 } 2811 2812 // Receiver type not found in profile data; select an empty slot. 2813 2814 // Note that this is less efficient than it should be because it 2815 // always does a write to the receiver part of the 2816 // VirtualCallData rather than just the first time. 2817 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2818 ciKlass* receiver = vc_data->receiver(i); 2819 if (receiver == NULL) { 2820 metadata2reg(known_klass->constant_encoding(), tmp1); 2821 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); 2822 2823 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2824 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2825 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2826 return; 2827 } 2828 } 2829 } else { 2830 __ load_klass(recv, recv); 2831 Label update_done; 2832 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2833 // Receiver did not match any saved receiver and there is no empty row for it. 2834 // Increment total counter to indicate polymorphic case. 2835 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2836 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2837 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2838 2839 __ bind(update_done); 2840 } 2841 } else { 2842 // Static call 2843 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2844 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2845 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2846 } 2847 } 2848 2849 2850 void LIR_Assembler::align_backward_branch_target() { 2851 __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary. 2852 } 2853 2854 2855 void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2856 Unimplemented(); 2857 } 2858 2859 2860 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2861 assert(left->is_register(), "can only handle registers"); 2862 2863 if (left->is_single_cpu()) { 2864 __ neg(dest->as_register(), left->as_register()); 2865 } else if (left->is_single_fpu()) { 2866 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2867 } else if (left->is_double_fpu()) { 2868 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2869 } else { 2870 assert (left->is_double_cpu(), "Must be a long"); 2871 __ neg(dest->as_register_lo(), left->as_register_lo()); 2872 } 2873 } 2874 2875 2876 void LIR_Assembler::fxch(int i) { 2877 Unimplemented(); 2878 } 2879 2880 void LIR_Assembler::fld(int i) { 2881 Unimplemented(); 2882 } 2883 2884 void LIR_Assembler::ffree(int i) { 2885 Unimplemented(); 2886 } 2887 2888 2889 void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2890 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2891 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2892 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2893 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2894 //__ load_const_optimized(R0, dest); 2895 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2896 __ mtctr(R0); 2897 __ bctrl(); 2898 assert(info != NULL, "sanity"); 2899 add_call_info_here(info); 2900 return; 2901 } 2902 2903 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2904 if (info != NULL) { 2905 add_call_info_here(info); 2906 } 2907 } 2908 2909 2910 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2911 ShouldNotReachHere(); // Not needed on _LP64. 2912 } 2913 2914 void LIR_Assembler::membar() { 2915 __ fence(); 2916 } 2917 2918 void LIR_Assembler::membar_acquire() { 2919 __ acquire(); 2920 } 2921 2922 void LIR_Assembler::membar_release() { 2923 __ release(); 2924 } 2925 2926 void LIR_Assembler::membar_loadload() { 2927 __ membar(Assembler::LoadLoad); 2928 } 2929 2930 void LIR_Assembler::membar_storestore() { 2931 __ membar(Assembler::StoreStore); 2932 } 2933 2934 void LIR_Assembler::membar_loadstore() { 2935 __ membar(Assembler::LoadStore); 2936 } 2937 2938 void LIR_Assembler::membar_storeload() { 2939 __ membar(Assembler::StoreLoad); 2940 } 2941 2942 void LIR_Assembler::on_spin_wait() { 2943 Unimplemented(); 2944 } 2945 2946 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 2947 LIR_Address* addr = addr_opr->as_address_ptr(); 2948 assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); 2949 if (addr->index()->is_illegal()) { 2950 __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp()); 2951 } else { 2952 assert(addr->disp() == 0, "can't have both: index and disp"); 2953 __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register()); 2954 } 2955 } 2956 2957 2958 void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2959 ShouldNotReachHere(); 2960 } 2961 2962 2963 #ifdef ASSERT 2964 // Emit run-time assertion. 2965 void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2966 Unimplemented(); 2967 } 2968 #endif 2969 2970 2971 void LIR_Assembler::peephole(LIR_List* lir) { 2972 // Optimize instruction pairs before emitting. 2973 LIR_OpList* inst = lir->instructions_list(); 2974 for (int i = 1; i < inst->length(); i++) { 2975 LIR_Op* op = inst->at(i); 2976 2977 // 2 register-register-moves 2978 if (op->code() == lir_move) { 2979 LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(), 2980 res2 = ((LIR_Op1*)op)->result_opr(); 2981 if (in2->is_register() && res2->is_register()) { 2982 LIR_Op* prev = inst->at(i - 1); 2983 if (prev && prev->code() == lir_move) { 2984 LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(), 2985 res1 = ((LIR_Op1*)prev)->result_opr(); 2986 if (in1->is_same_register(res2) && in2->is_same_register(res1)) { 2987 inst->remove_at(i); 2988 } 2989 } 2990 } 2991 } 2992 2993 } 2994 return; 2995 } 2996 2997 2998 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2999 const Register Rptr = src->as_pointer_register(), 3000 Rtmp = tmp->as_register(); 3001 Register Rco = noreg; 3002 if (UseCompressedOops && data->is_oop()) { 3003 Rco = __ encode_heap_oop(Rtmp, data->as_register()); 3004 } 3005 3006 Label Lretry; 3007 __ bind(Lretry); 3008 3009 if (data->type() == T_INT) { 3010 const Register Rold = dest->as_register(), 3011 Rsrc = data->as_register(); 3012 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3013 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3014 if (code == lir_xadd) { 3015 __ add(Rtmp, Rsrc, Rold); 3016 __ stwcx_(Rtmp, Rptr); 3017 } else { 3018 __ stwcx_(Rsrc, Rptr); 3019 } 3020 } else if (data->is_oop()) { 3021 assert(code == lir_xchg, "xadd for oops"); 3022 const Register Rold = dest->as_register(); 3023 if (UseCompressedOops) { 3024 assert_different_registers(Rptr, Rold, Rco); 3025 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3026 __ stwcx_(Rco, Rptr); 3027 } else { 3028 const Register Robj = data->as_register(); 3029 assert_different_registers(Rptr, Rold, Robj); 3030 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3031 __ stdcx_(Robj, Rptr); 3032 } 3033 } else if (data->type() == T_LONG) { 3034 const Register Rold = dest->as_register_lo(), 3035 Rsrc = data->as_register_lo(); 3036 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 3037 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 3038 if (code == lir_xadd) { 3039 __ add(Rtmp, Rsrc, Rold); 3040 __ stdcx_(Rtmp, Rptr); 3041 } else { 3042 __ stdcx_(Rsrc, Rptr); 3043 } 3044 } else { 3045 ShouldNotReachHere(); 3046 } 3047 3048 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 3049 __ bne_predict_not_taken(CCR0, Lretry); 3050 } else { 3051 __ bne( CCR0, Lretry); 3052 } 3053 3054 if (UseCompressedOops && data->is_oop()) { 3055 __ decode_heap_oop(dest->as_register()); 3056 } 3057 } 3058 3059 3060 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3061 Register obj = op->obj()->as_register(); 3062 Register tmp = op->tmp()->as_pointer_register(); 3063 LIR_Address* mdo_addr = op->mdp()->as_address_ptr(); 3064 ciKlass* exact_klass = op->exact_klass(); 3065 intptr_t current_klass = op->current_klass(); 3066 bool not_null = op->not_null(); 3067 bool no_conflict = op->no_conflict(); 3068 3069 Label Lupdate, Ldo_update, Ldone; 3070 3071 bool do_null = !not_null; 3072 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3073 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3074 3075 assert(do_null || do_update, "why are we here?"); 3076 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3077 3078 __ verify_oop(obj); 3079 3080 if (do_null) { 3081 if (!TypeEntries::was_null_seen(current_klass)) { 3082 __ cmpdi(CCR0, obj, 0); 3083 __ bne(CCR0, Lupdate); 3084 __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3085 __ ori(R0, R0, TypeEntries::null_seen); 3086 if (do_update) { 3087 __ b(Ldo_update); 3088 } else { 3089 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3090 } 3091 } else { 3092 if (do_update) { 3093 __ cmpdi(CCR0, obj, 0); 3094 __ beq(CCR0, Ldone); 3095 } 3096 } 3097 #ifdef ASSERT 3098 } else { 3099 __ cmpdi(CCR0, obj, 0); 3100 __ bne(CCR0, Lupdate); 3101 __ stop("unexpect null obj", 0x9652); 3102 #endif 3103 } 3104 3105 __ bind(Lupdate); 3106 if (do_update) { 3107 Label Lnext; 3108 const Register klass = R29_TOC; // kill and reload 3109 bool klass_reg_used = false; 3110 #ifdef ASSERT 3111 if (exact_klass != NULL) { 3112 Label ok; 3113 klass_reg_used = true; 3114 __ load_klass(klass, obj); 3115 metadata2reg(exact_klass->constant_encoding(), R0); 3116 __ cmpd(CCR0, klass, R0); 3117 __ beq(CCR0, ok); 3118 __ stop("exact klass and actual klass differ", 0x8564); 3119 __ bind(ok); 3120 } 3121 #endif 3122 3123 if (!no_conflict) { 3124 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3125 klass_reg_used = true; 3126 if (exact_klass != NULL) { 3127 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3128 metadata2reg(exact_klass->constant_encoding(), klass); 3129 } else { 3130 __ load_klass(klass, obj); 3131 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj 3132 } 3133 3134 // Like InterpreterMacroAssembler::profile_obj_type 3135 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3136 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3137 __ cmpd(CCR1, R0, klass); 3138 // Klass seen before, nothing to do (regardless of unknown bit). 3139 //beq(CCR1, do_nothing); 3140 3141 __ andi_(R0, klass, TypeEntries::type_unknown); 3142 // Already unknown. Nothing to do anymore. 3143 //bne(CCR0, do_nothing); 3144 __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 3145 __ beq(CCR0, Lnext); 3146 3147 if (TypeEntries::is_type_none(current_klass)) { 3148 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3149 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3150 __ beq(CCR0, Ldo_update); // First time here. Set profile type. 3151 } 3152 3153 } else { 3154 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3155 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3156 3157 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3158 __ andi_(R0, tmp, TypeEntries::type_unknown); 3159 // Already unknown. Nothing to do anymore. 3160 __ bne(CCR0, Lnext); 3161 } 3162 3163 // Different than before. Cannot keep accurate profile. 3164 __ ori(R0, tmp, TypeEntries::type_unknown); 3165 } else { 3166 // There's a single possible klass at this profile point 3167 assert(exact_klass != NULL, "should be"); 3168 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3169 3170 if (TypeEntries::is_type_none(current_klass)) { 3171 klass_reg_used = true; 3172 metadata2reg(exact_klass->constant_encoding(), klass); 3173 3174 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3175 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3176 __ cmpd(CCR1, R0, klass); 3177 // Klass seen before, nothing to do (regardless of unknown bit). 3178 __ beq(CCR1, Lnext); 3179 #ifdef ASSERT 3180 { 3181 Label ok; 3182 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3183 __ beq(CCR0, ok); // First time here. 3184 3185 __ stop("unexpected profiling mismatch", 0x7865); 3186 __ bind(ok); 3187 } 3188 #endif 3189 // First time here. Set profile type. 3190 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3191 } else { 3192 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3193 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3194 3195 // Already unknown. Nothing to do anymore. 3196 __ andi_(R0, tmp, TypeEntries::type_unknown); 3197 __ bne(CCR0, Lnext); 3198 3199 // Different than before. Cannot keep accurate profile. 3200 __ ori(R0, tmp, TypeEntries::type_unknown); 3201 } 3202 } 3203 3204 __ bind(Ldo_update); 3205 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3206 3207 __ bind(Lnext); 3208 if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit 3209 } 3210 __ bind(Ldone); 3211 } 3212 3213 3214 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3215 assert(op->crc()->is_single_cpu(), "crc must be register"); 3216 assert(op->val()->is_single_cpu(), "byte value must be register"); 3217 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3218 Register crc = op->crc()->as_register(); 3219 Register val = op->val()->as_register(); 3220 Register res = op->result_opr()->as_register(); 3221 3222 assert_different_registers(val, crc, res); 3223 3224 __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); 3225 __ nand(crc, crc, crc); // ~crc 3226 __ update_byte_crc32(crc, val, res); 3227 __ nand(res, crc, crc); // ~crc 3228 } 3229 3230 #undef __