1 /*
   2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "gc/shared/collectedHeap.hpp"
  35 #include "gc/shared/barrierSet.hpp"
  36 #include "gc/shared/cardTableModRefBS.hpp"
  37 #include "nativeInst_ppc.hpp"
  38 #include "oops/objArrayKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 
  41 #define __ _masm->
  42 
  43 
  44 const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5;
  45 
  46 
  47 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
  48   Unimplemented(); return false; // Currently not used on this platform.
  49 }
  50 
  51 
  52 LIR_Opr LIR_Assembler::receiverOpr() {
  53   return FrameMap::R3_oop_opr;
  54 }
  55 
  56 
  57 LIR_Opr LIR_Assembler::osrBufferPointer() {
  58   return FrameMap::R3_opr;
  59 }
  60 
  61 
  62 // This specifies the stack pointer decrement needed to build the frame.
  63 int LIR_Assembler::initial_frame_size_in_bytes() const {
  64   return in_bytes(frame_map()->framesize_in_bytes());
  65 }
  66 
  67 
  68 // Inline cache check: the inline cached class is in inline_cache_reg;
  69 // we fetch the class of the receiver and compare it with the cached class.
  70 // If they do not match we jump to slow case.
  71 int LIR_Assembler::check_icache() {
  72   int offset = __ offset();
  73   __ inline_cache_check(R3_ARG1, R19_inline_cache_reg);
  74   return offset;
  75 }
  76 
  77 
  78 void LIR_Assembler::osr_entry() {
  79   // On-stack-replacement entry sequence:
  80   //
  81   //   1. Create a new compiled activation.
  82   //   2. Initialize local variables in the compiled activation. The expression
  83   //      stack must be empty at the osr_bci; it is not initialized.
  84   //   3. Jump to the continuation address in compiled code to resume execution.
  85 
  86   // OSR entry point
  87   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
  88   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
  89   ValueStack* entry_state = osr_entry->end()->state();
  90   int number_of_locks = entry_state->locks_size();
  91 
  92   // Create a frame for the compiled activation.
  93   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
  94 
  95   // OSR buffer is
  96   //
  97   // locals[nlocals-1..0]
  98   // monitors[number_of_locks-1..0]
  99   //
 100   // Locals is a direct copy of the interpreter frame so in the osr buffer
 101   // the first slot in the local array is the last local from the interpreter
 102   // and the last slot is local[0] (receiver) from the interpreter.
 103   //
 104   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 105   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 106   // in the interpreter frame (the method lock if a sync method).
 107 
 108   // Initialize monitors in the compiled activation.
 109   //   R3: pointer to osr buffer
 110   //
 111   // All other registers are dead at this point and the locals will be
 112   // copied into place by code emitted in the IR.
 113 
 114   Register OSR_buf = osrBufferPointer()->as_register();
 115   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 116     int monitor_offset = BytesPerWord * method()->max_locals() +
 117       (2 * BytesPerWord) * (number_of_locks - 1);
 118     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 119     // the OSR buffer using 2 word entries: first the lock and then
 120     // the oop.
 121     for (int i = 0; i < number_of_locks; i++) {
 122       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 123 #ifdef ASSERT
 124       // Verify the interpreter's monitor has a non-null object.
 125       {
 126         Label L;
 127         __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 128         __ cmpdi(CCR0, R0, 0);
 129         __ bne(CCR0, L);
 130         __ stop("locked object is NULL");
 131         __ bind(L);
 132       }
 133 #endif // ASSERT
 134       // Copy the lock field into the compiled activation.
 135       Address ml = frame_map()->address_for_monitor_lock(i),
 136               mo = frame_map()->address_for_monitor_object(i);
 137       assert(ml.index() == noreg && mo.index() == noreg, "sanity");
 138       __ ld(R0, slot_offset + 0, OSR_buf);
 139       __ std(R0, ml.disp(), ml.base());
 140       __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
 141       __ std(R0, mo.disp(), mo.base());
 142     }
 143   }
 144 }
 145 
 146 
 147 int LIR_Assembler::emit_exception_handler() {
 148   // If the last instruction is a call (typically to do a throw which
 149   // is coming at the end after block reordering) the return address
 150   // must still point into the code area in order to avoid assertion
 151   // failures when searching for the corresponding bci => add a nop
 152   // (was bug 5/14/1999 - gri).
 153   __ nop();
 154 
 155   // Generate code for the exception handler.
 156   address handler_base = __ start_a_stub(exception_handler_size);
 157 
 158   if (handler_base == NULL) {
 159     // Not enough space left for the handler.
 160     bailout("exception handler overflow");
 161     return -1;
 162   }
 163 
 164   int offset = code_offset();
 165   address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
 166   //__ load_const_optimized(R0, entry_point);
 167   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
 168   __ mtctr(R0);
 169   __ bctr();
 170 
 171   guarantee(code_offset() - offset <= exception_handler_size, "overflow");
 172   __ end_a_stub();
 173 
 174   return offset;
 175 }
 176 
 177 
 178 // Emit the code to remove the frame from the stack in the exception
 179 // unwind path.
 180 int LIR_Assembler::emit_unwind_handler() {
 181   _masm->block_comment("Unwind handler");
 182 
 183   int offset = code_offset();
 184   bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes();
 185   const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31;
 186 
 187   // Fetch the exception from TLS and clear out exception related thread state.
 188   __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 189   __ li(R0, 0);
 190   __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
 191   __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
 192 
 193   __ bind(_unwind_handler_entry);
 194   __ verify_not_null_oop(Rexception);
 195   if (preserve_exception) { __ mr(Rexception_save, Rexception); }
 196 
 197   // Perform needed unlocking
 198   MonitorExitStub* stub = NULL;
 199   if (method()->is_synchronized()) {
 200     monitor_address(0, FrameMap::R4_opr);
 201     stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);
 202     __ unlock_object(R5, R6, R4, *stub->entry());
 203     __ bind(*stub->continuation());
 204   }
 205 
 206   if (compilation()->env()->dtrace_method_probes()) {
 207     Unimplemented();
 208   }
 209 
 210   // Dispatch to the unwind logic.
 211   address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);
 212   //__ load_const_optimized(R0, unwind_stub);
 213   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
 214   if (preserve_exception) { __ mr(Rexception, Rexception_save); }
 215   __ mtctr(R0);
 216   __ bctr();
 217 
 218   // Emit the slow path assembly.
 219   if (stub != NULL) {
 220     stub->emit_code(this);
 221   }
 222 
 223   return offset;
 224 }
 225 
 226 
 227 int LIR_Assembler::emit_deopt_handler() {
 228   // If the last instruction is a call (typically to do a throw which
 229   // is coming at the end after block reordering) the return address
 230   // must still point into the code area in order to avoid assertion
 231   // failures when searching for the corresponding bci => add a nop
 232   // (was bug 5/14/1999 - gri).
 233   __ nop();
 234 
 235   // Generate code for deopt handler.
 236   address handler_base = __ start_a_stub(deopt_handler_size);
 237 
 238   if (handler_base == NULL) {
 239     // Not enough space left for the handler.
 240     bailout("deopt handler overflow");
 241     return -1;
 242   }
 243 
 244   int offset = code_offset();
 245   __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type);
 246 
 247   guarantee(code_offset() - offset <= deopt_handler_size, "overflow");
 248   __ end_a_stub();
 249 
 250   return offset;
 251 }
 252 
 253 
 254 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 255   if (o == NULL) {
 256     __ li(reg, 0);
 257   } else {
 258     AddressLiteral addrlit = __ constant_oop_address(o);
 259     __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg);
 260   }
 261 }
 262 
 263 
 264 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 265   // Allocate a new index in table to hold the object once it's been patched.
 266   int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
 267   PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
 268 
 269   AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index));
 270   __ load_const(reg, addrlit, R0);
 271 
 272   patching_epilog(patch, lir_patch_normal, reg, info);
 273 }
 274 
 275 
 276 void LIR_Assembler::metadata2reg(Metadata* o, Register reg) {
 277   AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation)
 278   __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg);
 279 }
 280 
 281 
 282 void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) {
 283   // Allocate a new index in table to hold the klass once it's been patched.
 284   int index = __ oop_recorder()->allocate_metadata_index(NULL);
 285   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index);
 286 
 287   AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index));
 288   assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc");
 289   __ load_const(reg, addrlit, R0);
 290 
 291   patching_epilog(patch, lir_patch_normal, reg, info);
 292 }
 293 
 294 
 295 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 296   const bool is_int = op->result_opr()->is_single_cpu();
 297   Register Rdividend = is_int ? op->in_opr1()->as_register() : op->in_opr1()->as_register_lo();
 298   Register Rdivisor  = noreg;
 299   Register Rscratch  = op->in_opr3()->as_register();
 300   Register Rresult   = is_int ? op->result_opr()->as_register() : op->result_opr()->as_register_lo();
 301   long divisor = -1;
 302 
 303   if (op->in_opr2()->is_register()) {
 304     Rdivisor = is_int ? op->in_opr2()->as_register() : op->in_opr2()->as_register_lo();
 305   } else {
 306     divisor = is_int ? op->in_opr2()->as_constant_ptr()->as_jint()
 307                      : op->in_opr2()->as_constant_ptr()->as_jlong();
 308   }
 309 
 310   assert(Rdividend != Rscratch, "");
 311   assert(Rdivisor  != Rscratch, "");
 312   assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
 313 
 314   if (Rdivisor == noreg) {
 315     if (divisor == 1) { // stupid, but can happen
 316       if (op->code() == lir_idiv) {
 317         __ mr_if_needed(Rresult, Rdividend);
 318       } else {
 319         __ li(Rresult, 0);
 320       }
 321 
 322     } else if (is_power_of_2(divisor)) {
 323       // Convert division by a power of two into some shifts and logical operations.
 324       int log2 = log2_intptr(divisor);
 325 
 326       // Round towards 0.
 327       if (divisor == 2) {
 328         if (is_int) {
 329           __ srwi(Rscratch, Rdividend, 31);
 330         } else {
 331           __ srdi(Rscratch, Rdividend, 63);
 332         }
 333       } else {
 334         if (is_int) {
 335           __ srawi(Rscratch, Rdividend, 31);
 336         } else {
 337           __ sradi(Rscratch, Rdividend, 63);
 338         }
 339         __ clrldi(Rscratch, Rscratch, 64-log2);
 340       }
 341       __ add(Rscratch, Rdividend, Rscratch);
 342 
 343       if (op->code() == lir_idiv) {
 344         if (is_int) {
 345           __ srawi(Rresult, Rscratch, log2);
 346         } else {
 347           __ sradi(Rresult, Rscratch, log2);
 348         }
 349       } else { // lir_irem
 350         __ clrrdi(Rscratch, Rscratch, log2);
 351         __ sub(Rresult, Rdividend, Rscratch);
 352       }
 353 
 354     } else if (divisor == -1) {
 355       if (op->code() == lir_idiv) {
 356         __ neg(Rresult, Rdividend);
 357       } else {
 358         __ li(Rresult, 0);
 359       }
 360 
 361     } else {
 362       __ load_const_optimized(Rscratch, divisor);
 363       if (op->code() == lir_idiv) {
 364         if (is_int) {
 365           __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.
 366         } else {
 367           __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1.
 368         }
 369       } else {
 370         assert(Rscratch != R0, "need both");
 371         if (is_int) {
 372           __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1.
 373           __ mullw(Rscratch, R0, Rscratch);
 374         } else {
 375           __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1.
 376           __ mulld(Rscratch, R0, Rscratch);
 377         }
 378         __ sub(Rresult, Rdividend, Rscratch);
 379       }
 380 
 381     }
 382     return;
 383   }
 384 
 385   Label regular, done;
 386   if (is_int) {
 387     __ cmpwi(CCR0, Rdivisor, -1);
 388   } else {
 389     __ cmpdi(CCR0, Rdivisor, -1);
 390   }
 391   __ bne(CCR0, regular);
 392   if (op->code() == lir_idiv) {
 393     __ neg(Rresult, Rdividend);
 394     __ b(done);
 395     __ bind(regular);
 396     if (is_int) {
 397       __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.
 398     } else {
 399       __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1.
 400     }
 401   } else { // lir_irem
 402     __ li(Rresult, 0);
 403     __ b(done);
 404     __ bind(regular);
 405     if (is_int) {
 406       __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.
 407       __ mullw(Rscratch, Rscratch, Rdivisor);
 408     } else {
 409       __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1.
 410       __ mulld(Rscratch, Rscratch, Rdivisor);
 411     }
 412     __ sub(Rresult, Rdividend, Rscratch);
 413   }
 414   __ bind(done);
 415 }
 416 
 417 
 418 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 419 #ifdef ASSERT
 420   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
 421   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
 422   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
 423   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
 424 #endif
 425 
 426   Label *L = op->label();
 427   if (op->cond() == lir_cond_always) {
 428     __ b(*L);
 429   } else {
 430     Label done;
 431     bool is_unordered = false;
 432     if (op->code() == lir_cond_float_branch) {
 433       assert(op->ublock() != NULL, "must have unordered successor");
 434       is_unordered = true;
 435     } else {
 436       assert(op->code() == lir_branch, "just checking");
 437     }
 438 
 439     bool positive = false;
 440     Assembler::Condition cond = Assembler::equal;
 441     switch (op->cond()) {
 442       case lir_cond_equal:        positive = true ; cond = Assembler::equal  ; is_unordered = false; break;
 443       case lir_cond_notEqual:     positive = false; cond = Assembler::equal  ; is_unordered = false; break;
 444       case lir_cond_less:         positive = true ; cond = Assembler::less   ; break;
 445       case lir_cond_belowEqual:   assert(op->code() != lir_cond_float_branch, ""); // fallthru
 446       case lir_cond_lessEqual:    positive = false; cond = Assembler::greater; break;
 447       case lir_cond_greater:      positive = true ; cond = Assembler::greater; break;
 448       case lir_cond_aboveEqual:   assert(op->code() != lir_cond_float_branch, ""); // fallthru
 449       case lir_cond_greaterEqual: positive = false; cond = Assembler::less   ; break;
 450       default:                    ShouldNotReachHere();
 451     }
 452     int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
 453     int bi = Assembler::bi0(BOOL_RESULT, cond);
 454     if (is_unordered) {
 455       if (positive) {
 456         if (op->ublock() == op->block()) {
 457           __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L);
 458         }
 459       } else {
 460         if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); }
 461       }
 462     }
 463     __ bc_far_optimized(bo, bi, *L);
 464     __ bind(done);
 465   }
 466 }
 467 
 468 
 469 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 470   Bytecodes::Code code = op->bytecode();
 471   LIR_Opr src = op->in_opr(),
 472           dst = op->result_opr();
 473 
 474   switch(code) {
 475     case Bytecodes::_i2l: {
 476       __ extsw(dst->as_register_lo(), src->as_register());
 477       break;
 478     }
 479     case Bytecodes::_l2i: {
 480       __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage
 481       break;
 482     }
 483     case Bytecodes::_i2b: {
 484       __ extsb(dst->as_register(), src->as_register());
 485       break;
 486     }
 487     case Bytecodes::_i2c: {
 488       __ clrldi(dst->as_register(), src->as_register(), 64-16);
 489       break;
 490     }
 491     case Bytecodes::_i2s: {
 492       __ extsh(dst->as_register(), src->as_register());
 493       break;
 494     }
 495     case Bytecodes::_i2d:
 496     case Bytecodes::_l2d: {
 497       __ fcfid(dst->as_double_reg(), src->as_double_reg()); // via mem
 498       break;
 499     }
 500     case Bytecodes::_i2f: {
 501       FloatRegister rdst = dst->as_float_reg();
 502       FloatRegister rsrc = src->as_double_reg(); // via mem
 503       if (VM_Version::has_fcfids()) {
 504         __ fcfids(rdst, rsrc);
 505       } else {
 506         __ fcfid(rdst, rsrc);
 507         __ frsp(rdst, rdst);
 508       }
 509       break;
 510     }
 511     case Bytecodes::_l2f: { // >= Power7
 512       assert(VM_Version::has_fcfids(), "fcfid+frsp needs fixup code to avoid rounding incompatibility");
 513       __ fcfids(dst->as_float_reg(), src->as_double_reg()); // via mem
 514       break;
 515     }
 516     case Bytecodes::_f2d: {
 517       __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg());
 518       break;
 519     }
 520     case Bytecodes::_d2f: {
 521       __ frsp(dst->as_float_reg(), src->as_double_reg());
 522       break;
 523     }
 524     case Bytecodes::_d2i:
 525     case Bytecodes::_f2i: {
 526       FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg();
 527       Address       addr = frame_map()->address_for_slot(dst->double_stack_ix());
 528       Label L;
 529       // Result must be 0 if value is NaN; test by comparing value to itself.
 530       __ fcmpu(CCR0, rsrc, rsrc);
 531       __ li(R0, 0); // 0 in case of NAN
 532       __ std(R0, addr.disp(), addr.base());
 533       __ bso(CCR0, L);
 534       __ fctiwz(rsrc, rsrc); // USE_KILL
 535       __ stfd(rsrc, addr.disp(), addr.base());
 536       __ bind(L);
 537       break;
 538     }
 539     case Bytecodes::_d2l:
 540     case Bytecodes::_f2l: {
 541       FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg();
 542       Address       addr = frame_map()->address_for_slot(dst->double_stack_ix());
 543       Label L;
 544       // Result must be 0 if value is NaN; test by comparing value to itself.
 545       __ fcmpu(CCR0, rsrc, rsrc);
 546       __ li(R0, 0); // 0 in case of NAN
 547       __ std(R0, addr.disp(), addr.base());
 548       __ bso(CCR0, L);
 549       __ fctidz(rsrc, rsrc); // USE_KILL
 550       __ stfd(rsrc, addr.disp(), addr.base());
 551       __ bind(L);
 552       break;
 553     }
 554 
 555     default: ShouldNotReachHere();
 556   }
 557 }
 558 
 559 
 560 void LIR_Assembler::align_call(LIR_Code) {
 561   // do nothing since all instructions are word aligned on ppc
 562 }
 563 
 564 
 565 bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) {
 566   int start_offset = __ offset();
 567   // Put the entry point as a constant into the constant pool.
 568   const address entry_point_toc_addr   = __ address_constant(target, RelocationHolder::none);
 569   if (entry_point_toc_addr == NULL) {
 570     bailout("const section overflow");
 571     return false;
 572   }
 573   const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
 574 
 575   // Emit the trampoline stub which will be related to the branch-and-link below.
 576   address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc);
 577   if (!stub) {
 578     bailout("no space for trampoline stub");
 579     return false;
 580   }
 581   return true;
 582 }
 583 
 584 
 585 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
 586   assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype");
 587 
 588   bool success = emit_trampoline_stub_for_call(op->addr());
 589   if (!success) { return; }
 590 
 591   __ relocate(rtype);
 592   // Note: At this point we do not have the address of the trampoline
 593   // stub, and the entry point might be too far away for bl, so __ pc()
 594   // serves as dummy and the bl will be patched later.
 595   __ code()->set_insts_mark();
 596   __ bl(__ pc());
 597   add_call_info(code_offset(), op->info());
 598 }
 599 
 600 
 601 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
 602   __ calculate_address_from_global_toc(R2_TOC, __ method_toc());
 603 
 604   // Virtual call relocation will point to ic load.
 605   address virtual_call_meta_addr = __ pc();
 606   // Load a clear inline cache.
 607   AddressLiteral empty_ic((address) Universe::non_oop_word());
 608   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC);
 609   if (!success) {
 610     bailout("const section overflow");
 611     return;
 612   }
 613   // Call to fixup routine. Fixup routine uses ScopeDesc info
 614   // to determine who we intended to call.
 615   __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
 616 
 617   success = emit_trampoline_stub_for_call(op->addr(), R2_TOC);
 618   if (!success) { return; }
 619 
 620   // Note: At this point we do not have the address of the trampoline
 621   // stub, and the entry point might be too far away for bl, so __ pc()
 622   // serves as dummy and the bl will be patched later.
 623   __ bl(__ pc());
 624   add_call_info(code_offset(), op->info());
 625 }
 626 
 627 
 628 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
 629   ShouldNotReachHere(); // ic_call is used instead.
 630 }
 631 
 632 
 633 void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) {
 634   ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info);
 635   __ null_check(addr, stub->entry());
 636   append_code_stub(stub);
 637 }
 638 
 639 
 640 // Attention: caller must encode oop if needed
 641 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
 642   int store_offset;
 643   if (!Assembler::is_simm16(offset)) {
 644     // For offsets larger than a simm16 we setup the offset.
 645     assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case");
 646     __ load_const_optimized(R0, offset);
 647     store_offset = store(from_reg, base, R0, type, wide);
 648   } else {
 649     store_offset = code_offset();
 650     switch (type) {
 651       case T_BOOLEAN: // fall through
 652       case T_BYTE  : __ stb(from_reg->as_register(), offset, base); break;
 653       case T_CHAR  :
 654       case T_SHORT : __ sth(from_reg->as_register(), offset, base); break;
 655       case T_INT   : __ stw(from_reg->as_register(), offset, base); break;
 656       case T_LONG  : __ std(from_reg->as_register_lo(), offset, base); break;
 657       case T_ADDRESS:
 658       case T_METADATA: __ std(from_reg->as_register(), offset, base); break;
 659       case T_ARRAY : // fall through
 660       case T_OBJECT:
 661         {
 662           if (UseCompressedOops && !wide) {
 663             // Encoding done in caller
 664             __ stw(from_reg->as_register(), offset, base);
 665           } else {
 666             __ std(from_reg->as_register(), offset, base);
 667           }
 668           __ verify_oop(from_reg->as_register());
 669           break;
 670         }
 671       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
 672       case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break;
 673       default      : ShouldNotReachHere();
 674     }
 675   }
 676   return store_offset;
 677 }
 678 
 679 
 680 // Attention: caller must encode oop if needed
 681 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 682   int store_offset = code_offset();
 683   switch (type) {
 684     case T_BOOLEAN: // fall through
 685     case T_BYTE  : __ stbx(from_reg->as_register(), base, disp); break;
 686     case T_CHAR  :
 687     case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break;
 688     case T_INT   : __ stwx(from_reg->as_register(), base, disp); break;
 689     case T_LONG  :
 690 #ifdef _LP64
 691       __ stdx(from_reg->as_register_lo(), base, disp);
 692 #else
 693       Unimplemented();
 694 #endif
 695       break;
 696     case T_ADDRESS:
 697       __ stdx(from_reg->as_register(), base, disp);
 698       break;
 699     case T_ARRAY : // fall through
 700     case T_OBJECT:
 701       {
 702         if (UseCompressedOops && !wide) {
 703           // Encoding done in caller.
 704           __ stwx(from_reg->as_register(), base, disp);
 705         } else {
 706           __ stdx(from_reg->as_register(), base, disp);
 707         }
 708         __ verify_oop(from_reg->as_register()); // kills R0
 709         break;
 710       }
 711     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
 712     case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break;
 713     default      : ShouldNotReachHere();
 714   }
 715   return store_offset;
 716 }
 717 
 718 
 719 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
 720   int load_offset;
 721   if (!Assembler::is_simm16(offset)) {
 722     // For offsets larger than a simm16 we setup the offset.
 723     __ load_const_optimized(R0, offset);
 724     load_offset = load(base, R0, to_reg, type, wide);
 725   } else {
 726     load_offset = code_offset();
 727     switch(type) {
 728       case T_BOOLEAN: // fall through
 729       case T_BYTE  :   __ lbz(to_reg->as_register(), offset, base);
 730                        __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 731       case T_CHAR  :   __ lhz(to_reg->as_register(), offset, base); break;
 732       case T_SHORT :   __ lha(to_reg->as_register(), offset, base); break;
 733       case T_INT   :   __ lwa(to_reg->as_register(), offset, base); break;
 734       case T_LONG  :   __ ld(to_reg->as_register_lo(), offset, base); break;
 735       case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
 736       case T_ADDRESS:
 737         if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
 738           __ lwz(to_reg->as_register(), offset, base);
 739           __ decode_klass_not_null(to_reg->as_register());
 740         } else {
 741           __ ld(to_reg->as_register(), offset, base);
 742         }
 743         break;
 744       case T_ARRAY : // fall through
 745       case T_OBJECT:
 746         {
 747           if (UseCompressedOops && !wide) {
 748             __ lwz(to_reg->as_register(), offset, base);
 749             __ decode_heap_oop(to_reg->as_register());
 750           } else {
 751             __ ld(to_reg->as_register(), offset, base);
 752           }
 753           __ verify_oop(to_reg->as_register());
 754           break;
 755         }
 756       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
 757       case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
 758       default      : ShouldNotReachHere();
 759     }
 760   }
 761   return load_offset;
 762 }
 763 
 764 
 765 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 766   int load_offset = code_offset();
 767   switch(type) {
 768     case T_BOOLEAN: // fall through
 769     case T_BYTE  :  __ lbzx(to_reg->as_register(), base, disp);
 770                     __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 771     case T_CHAR  :  __ lhzx(to_reg->as_register(), base, disp); break;
 772     case T_SHORT :  __ lhax(to_reg->as_register(), base, disp); break;
 773     case T_INT   :  __ lwax(to_reg->as_register(), base, disp); break;
 774     case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break;
 775     case T_ARRAY : // fall through
 776     case T_OBJECT:
 777       {
 778         if (UseCompressedOops && !wide) {
 779           __ lwzx(to_reg->as_register(), base, disp);
 780           __ decode_heap_oop(to_reg->as_register());
 781         } else {
 782           __ ldx(to_reg->as_register(), base, disp);
 783         }
 784         __ verify_oop(to_reg->as_register());
 785         break;
 786       }
 787     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
 788     case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break;
 789     case T_LONG  :
 790 #ifdef _LP64
 791       __ ldx(to_reg->as_register_lo(), base, disp);
 792 #else
 793       Unimplemented();
 794 #endif
 795       break;
 796     default      : ShouldNotReachHere();
 797   }
 798   return load_offset;
 799 }
 800 
 801 
 802 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 803   LIR_Const* c = src->as_constant_ptr();
 804   Register src_reg = R0;
 805   switch (c->type()) {
 806     case T_INT:
 807     case T_FLOAT: {
 808       int value = c->as_jint_bits();
 809       __ load_const_optimized(src_reg, value);
 810       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 811       __ stw(src_reg, addr.disp(), addr.base());
 812       break;
 813     }
 814     case T_ADDRESS: {
 815       int value = c->as_jint_bits();
 816       __ load_const_optimized(src_reg, value);
 817       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 818       __ std(src_reg, addr.disp(), addr.base());
 819       break;
 820     }
 821     case T_OBJECT: {
 822       jobject2reg(c->as_jobject(), src_reg);
 823       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 824       __ std(src_reg, addr.disp(), addr.base());
 825       break;
 826     }
 827     case T_LONG:
 828     case T_DOUBLE: {
 829       int value = c->as_jlong_bits();
 830       __ load_const_optimized(src_reg, value);
 831       Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
 832       __ std(src_reg, addr.disp(), addr.base());
 833       break;
 834     }
 835     default:
 836       Unimplemented();
 837   }
 838 }
 839 
 840 
 841 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 842   LIR_Const* c = src->as_constant_ptr();
 843   LIR_Address* addr = dest->as_address_ptr();
 844   Register base = addr->base()->as_pointer_register();
 845   LIR_Opr tmp = LIR_OprFact::illegalOpr;
 846   int offset = -1;
 847   // Null check for large offsets in LIRGenerator::do_StoreField.
 848   bool needs_explicit_null_check = !ImplicitNullChecks;
 849 
 850   if (info != NULL && needs_explicit_null_check) {
 851     explicit_null_check(base, info);
 852   }
 853 
 854   switch (c->type()) {
 855     case T_FLOAT: type = T_INT;
 856     case T_INT:
 857     case T_ADDRESS: {
 858       tmp = FrameMap::R0_opr;
 859       __ load_const_optimized(tmp->as_register(), c->as_jint_bits());
 860       break;
 861     }
 862     case T_DOUBLE: type = T_LONG;
 863     case T_LONG: {
 864       tmp = FrameMap::R0_long_opr;
 865       __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits());
 866       break;
 867     }
 868     case T_OBJECT: {
 869       tmp = FrameMap::R0_opr;
 870       if (UseCompressedOops && !wide && c->as_jobject() != NULL) {
 871         AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject());
 872         __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw).
 873         __ relocate(oop_addr.rspec(), /*compressed format*/ 1);
 874         __ ori(R0, R0, oop_addr.value() & 0xffff);
 875       } else {
 876         jobject2reg(c->as_jobject(), R0);
 877       }
 878       break;
 879     }
 880     default:
 881       Unimplemented();
 882   }
 883 
 884   // Handle either reg+reg or reg+disp address.
 885   if (addr->index()->is_valid()) {
 886     assert(addr->disp() == 0, "must be zero");
 887     offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
 888   } else {
 889     assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses");
 890     offset = store(tmp, base, addr->disp(), type, wide, false);
 891   }
 892 
 893   if (info != NULL) {
 894     assert(offset != -1, "offset should've been set");
 895     if (!needs_explicit_null_check) {
 896       add_debug_info_for_null_check(offset, info);
 897     }
 898   }
 899 }
 900 
 901 
 902 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
 903   LIR_Const* c = src->as_constant_ptr();
 904   LIR_Opr to_reg = dest;
 905 
 906   switch (c->type()) {
 907     case T_INT: {
 908       assert(patch_code == lir_patch_none, "no patching handled here");
 909       __ load_const_optimized(dest->as_register(), c->as_jint(), R0);
 910       break;
 911     }
 912     case T_ADDRESS: {
 913       assert(patch_code == lir_patch_none, "no patching handled here");
 914       __ load_const_optimized(dest->as_register(), c->as_jint(), R0);  // Yes, as_jint ...
 915       break;
 916     }
 917     case T_LONG: {
 918       assert(patch_code == lir_patch_none, "no patching handled here");
 919       __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0);
 920       break;
 921     }
 922 
 923     case T_OBJECT: {
 924       if (patch_code == lir_patch_none) {
 925         jobject2reg(c->as_jobject(), to_reg->as_register());
 926       } else {
 927         jobject2reg_with_patching(to_reg->as_register(), info);
 928       }
 929       break;
 930     }
 931 
 932     case T_METADATA:
 933       {
 934         if (patch_code == lir_patch_none) {
 935           metadata2reg(c->as_metadata(), to_reg->as_register());
 936         } else {
 937           klass2reg_with_patching(to_reg->as_register(), info);
 938         }
 939       }
 940       break;
 941 
 942     case T_FLOAT:
 943       {
 944         if (to_reg->is_single_fpu()) {
 945           address const_addr = __ float_constant(c->as_jfloat());
 946           if (const_addr == NULL) {
 947             bailout("const section overflow");
 948             break;
 949           }
 950           RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
 951           __ relocate(rspec);
 952           __ load_const(R0, const_addr);
 953           __ lfsx(to_reg->as_float_reg(), R0);
 954         } else {
 955           assert(to_reg->is_single_cpu(), "Must be a cpu register.");
 956           __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0);
 957         }
 958       }
 959       break;
 960 
 961     case T_DOUBLE:
 962       {
 963         if (to_reg->is_double_fpu()) {
 964           address const_addr = __ double_constant(c->as_jdouble());
 965           if (const_addr == NULL) {
 966             bailout("const section overflow");
 967             break;
 968           }
 969           RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
 970           __ relocate(rspec);
 971           __ load_const(R0, const_addr);
 972           __ lfdx(to_reg->as_double_reg(), R0);
 973         } else {
 974           assert(to_reg->is_double_cpu(), "Must be a long register.");
 975           __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0);
 976         }
 977       }
 978       break;
 979 
 980     default:
 981       ShouldNotReachHere();
 982   }
 983 }
 984 
 985 
 986 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 987   Unimplemented(); return Address();
 988 }
 989 
 990 
 991 inline RegisterOrConstant index_or_disp(LIR_Address* addr) {
 992   if (addr->index()->is_illegal()) {
 993     return (RegisterOrConstant)(addr->disp());
 994   } else {
 995     return (RegisterOrConstant)(addr->index()->as_pointer_register());
 996   }
 997 }
 998 
 999 
1000 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1001   const Register tmp = R0;
1002   switch (type) {
1003     case T_INT:
1004     case T_FLOAT: {
1005       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1006       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1007       __ lwz(tmp, from.disp(), from.base());
1008       __ stw(tmp, to.disp(), to.base());
1009       break;
1010     }
1011     case T_ADDRESS:
1012     case T_OBJECT: {
1013       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1014       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1015       __ ld(tmp, from.disp(), from.base());
1016       __ std(tmp, to.disp(), to.base());
1017       break;
1018     }
1019     case T_LONG:
1020     case T_DOUBLE: {
1021       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1022       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
1023       __ ld(tmp, from.disp(), from.base());
1024       __ std(tmp, to.disp(), to.base());
1025       break;
1026     }
1027 
1028     default:
1029       ShouldNotReachHere();
1030   }
1031 }
1032 
1033 
1034 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1035   Unimplemented(); return Address();
1036 }
1037 
1038 
1039 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1040   Unimplemented(); return Address();
1041 }
1042 
1043 
1044 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1045                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1046 
1047   assert(type != T_METADATA, "load of metadata ptr not supported");
1048   LIR_Address* addr = src_opr->as_address_ptr();
1049   LIR_Opr to_reg = dest;
1050 
1051   Register src = addr->base()->as_pointer_register();
1052   Register disp_reg = noreg;
1053   int disp_value = addr->disp();
1054   bool needs_patching = (patch_code != lir_patch_none);
1055   // null check for large offsets in LIRGenerator::do_LoadField
1056   bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;
1057 
1058   if (info != NULL && needs_explicit_null_check) {
1059     explicit_null_check(src, info);
1060   }
1061 
1062   if (addr->base()->type() == T_OBJECT) {
1063     __ verify_oop(src);
1064   }
1065 
1066   PatchingStub* patch = NULL;
1067   if (needs_patching) {
1068     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1069     assert(!to_reg->is_double_cpu() ||
1070            patch_code == lir_patch_none ||
1071            patch_code == lir_patch_normal, "patching doesn't match register");
1072   }
1073 
1074   if (addr->index()->is_illegal()) {
1075     if (!Assembler::is_simm16(disp_value)) {
1076       if (needs_patching) {
1077         __ load_const32(R0, 0); // patchable int
1078       } else {
1079         __ load_const_optimized(R0, disp_value);
1080       }
1081       disp_reg = R0;
1082     }
1083   } else {
1084     disp_reg = addr->index()->as_pointer_register();
1085     assert(disp_value == 0, "can't handle 3 operand addresses");
1086   }
1087 
1088   // Remember the offset of the load. The patching_epilog must be done
1089   // before the call to add_debug_info, otherwise the PcDescs don't get
1090   // entered in increasing order.
1091   int offset;
1092 
1093   if (disp_reg == noreg) {
1094     assert(Assembler::is_simm16(disp_value), "should have set this up");
1095     offset = load(src, disp_value, to_reg, type, wide, unaligned);
1096   } else {
1097     assert(!unaligned, "unexpected");
1098     offset = load(src, disp_reg, to_reg, type, wide);
1099   }
1100 
1101   if (patch != NULL) {
1102     patching_epilog(patch, patch_code, src, info);
1103   }
1104   if (info != NULL && !needs_explicit_null_check) {
1105     add_debug_info_for_null_check(offset, info);
1106   }
1107 }
1108 
1109 
1110 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1111   Address addr;
1112   if (src->is_single_word()) {
1113     addr = frame_map()->address_for_slot(src->single_stack_ix());
1114   } else if (src->is_double_word())  {
1115     addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1116   }
1117 
1118   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1119   load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
1120 }
1121 
1122 
1123 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1124   Address addr;
1125   if (dest->is_single_word()) {
1126     addr = frame_map()->address_for_slot(dest->single_stack_ix());
1127   } else if (dest->is_double_word())  {
1128     addr = frame_map()->address_for_slot(dest->double_stack_ix());
1129   }
1130   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1131   store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
1132 }
1133 
1134 
1135 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1136   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1137     if (from_reg->is_double_fpu()) {
1138       // double to double moves
1139       assert(to_reg->is_double_fpu(), "should match");
1140       __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg());
1141     } else {
1142       // float to float moves
1143       assert(to_reg->is_single_fpu(), "should match");
1144       __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg());
1145     }
1146   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1147     if (from_reg->is_double_cpu()) {
1148       __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1149     } else if (to_reg->is_double_cpu()) {
1150       // int to int moves
1151       __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register());
1152     } else {
1153       // int to int moves
1154       __ mr_if_needed(to_reg->as_register(), from_reg->as_register());
1155     }
1156   } else {
1157     ShouldNotReachHere();
1158   }
1159   if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1160     __ verify_oop(to_reg->as_register());
1161   }
1162 }
1163 
1164 
1165 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1166                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1167                             bool wide, bool unaligned) {
1168   assert(type != T_METADATA, "store of metadata ptr not supported");
1169   LIR_Address* addr = dest->as_address_ptr();
1170 
1171   Register src = addr->base()->as_pointer_register();
1172   Register disp_reg = noreg;
1173   int disp_value = addr->disp();
1174   bool needs_patching = (patch_code != lir_patch_none);
1175   bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide &&
1176                       Universe::narrow_oop_mode() != Universe::UnscaledNarrowOop;
1177   bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
1178   bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
1179   // Null check for large offsets in LIRGenerator::do_StoreField.
1180   bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;
1181 
1182   if (info != NULL && needs_explicit_null_check) {
1183     explicit_null_check(src, info);
1184   }
1185 
1186   if (addr->base()->is_oop_register()) {
1187     __ verify_oop(src);
1188   }
1189 
1190   PatchingStub* patch = NULL;
1191   if (needs_patching) {
1192     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1193     assert(!from_reg->is_double_cpu() ||
1194            patch_code == lir_patch_none ||
1195            patch_code == lir_patch_normal, "patching doesn't match register");
1196   }
1197 
1198   if (addr->index()->is_illegal()) {
1199     if (load_disp) {
1200       disp_reg = use_R29 ? R29_TOC : R0;
1201       if (needs_patching) {
1202         __ load_const32(disp_reg, 0); // patchable int
1203       } else {
1204         __ load_const_optimized(disp_reg, disp_value);
1205       }
1206     }
1207   } else {
1208     disp_reg = addr->index()->as_pointer_register();
1209     assert(disp_value == 0, "can't handle 3 operand addresses");
1210   }
1211 
1212   // remember the offset of the store. The patching_epilog must be done
1213   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1214   // entered in increasing order.
1215   int offset;
1216 
1217   if (compress_oop) {
1218     Register co = __ encode_heap_oop(R0, from_reg->as_register());
1219     from_reg = FrameMap::as_opr(co);
1220   }
1221 
1222   if (disp_reg == noreg) {
1223     assert(Assembler::is_simm16(disp_value), "should have set this up");
1224     offset = store(from_reg, src, disp_value, type, wide, unaligned);
1225   } else {
1226     assert(!unaligned, "unexpected");
1227     offset = store(from_reg, src, disp_reg, type, wide);
1228   }
1229 
1230   if (use_R29) {
1231     __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit
1232   }
1233 
1234   if (patch != NULL) {
1235     patching_epilog(patch, patch_code, src, info);
1236   }
1237 
1238   if (info != NULL && !needs_explicit_null_check) {
1239     add_debug_info_for_null_check(offset, info);
1240   }
1241 }
1242 
1243 
1244 void LIR_Assembler::return_op(LIR_Opr result) {
1245   const Register return_pc        = R11;
1246   const Register polling_page     = R12;
1247 
1248   // Pop the stack before the safepoint code.
1249   int frame_size = initial_frame_size_in_bytes();
1250   if (Assembler::is_simm(frame_size, 16)) {
1251     __ addi(R1_SP, R1_SP, frame_size);
1252   } else {
1253     __ pop_frame();
1254   }
1255 
1256   if (LoadPollAddressFromThread) {
1257     // TODO: PPC port __ ld(polling_page, in_bytes(JavaThread::poll_address_offset()), R16_thread);
1258     Unimplemented();
1259   } else {
1260     __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page()
1261   }
1262 
1263   // Restore return pc relative to callers' sp.
1264   __ ld(return_pc, _abi(lr), R1_SP);
1265   // Move return pc to LR.
1266   __ mtlr(return_pc);
1267 
1268   // We need to mark the code position where the load from the safepoint
1269   // polling page was emitted as relocInfo::poll_return_type here.
1270   __ relocate(relocInfo::poll_return_type);
1271   __ load_from_polling_page(polling_page);
1272 
1273   // Return.
1274   __ blr();
1275 }
1276 
1277 
1278 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1279 
1280   if (LoadPollAddressFromThread) {
1281     const Register poll_addr = tmp->as_register();
1282     // TODO: PPC port __ ld(poll_addr, in_bytes(JavaThread::poll_address_offset()), R16_thread);
1283     Unimplemented();
1284     __ relocate(relocInfo::poll_type); // XXX
1285     guarantee(info != NULL, "Shouldn't be NULL");
1286     int offset = __ offset();
1287     add_debug_info_for_branch(info);
1288     __ load_from_polling_page(poll_addr);
1289     return offset;
1290   }
1291 
1292   __ load_const_optimized(tmp->as_register(), (intptr_t)os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page()
1293   if (info != NULL) {
1294     add_debug_info_for_branch(info);
1295   }
1296   int offset = __ offset();
1297   __ relocate(relocInfo::poll_type);
1298   __ load_from_polling_page(tmp->as_register());
1299 
1300   return offset;
1301 }
1302 
1303 
1304 void LIR_Assembler::emit_static_call_stub() {
1305   address call_pc = __ pc();
1306   address stub = __ start_a_stub(max_static_call_stub_size);
1307   if (stub == NULL) {
1308     bailout("static call stub overflow");
1309     return;
1310   }
1311 
1312   // For java_to_interp stubs we use R11_scratch1 as scratch register
1313   // and in call trampoline stubs we use R12_scratch2. This way we
1314   // can distinguish them (see is_NativeCallTrampolineStub_at()).
1315   const Register reg_scratch = R11_scratch1;
1316 
1317   // Create a static stub relocation which relates this stub
1318   // with the call instruction at insts_call_instruction_offset in the
1319   // instructions code-section.
1320   int start = __ offset();
1321   __ relocate(static_stub_Relocation::spec(call_pc));
1322 
1323   // Now, create the stub's code:
1324   // - load the TOC
1325   // - load the inline cache oop from the constant pool
1326   // - load the call target from the constant pool
1327   // - call
1328   __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
1329   AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
1330   bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true);
1331 
1332   if (ReoptimizeCallSequences) {
1333     __ b64_patchable((address)-1, relocInfo::none);
1334   } else {
1335     AddressLiteral a((address)-1);
1336     success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
1337     __ mtctr(reg_scratch);
1338     __ bctr();
1339   }
1340   if (!success) {
1341     bailout("const section overflow");
1342     return;
1343   }
1344 
1345   assert(__ offset() - start <= max_static_call_stub_size, "stub too big");
1346   __ end_a_stub();
1347 }
1348 
1349 
1350 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1351   bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual);
1352   if (opr1->is_single_fpu()) {
1353     __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg());
1354   } else if (opr1->is_double_fpu()) {
1355     __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg());
1356   } else if (opr1->is_single_cpu()) {
1357     if (opr2->is_constant()) {
1358       switch (opr2->as_constant_ptr()->type()) {
1359         case T_INT:
1360           {
1361             jint con = opr2->as_constant_ptr()->as_jint();
1362             if (unsigned_comp) {
1363               if (Assembler::is_uimm(con, 16)) {
1364                 __ cmplwi(BOOL_RESULT, opr1->as_register(), con);
1365               } else {
1366                 __ load_const_optimized(R0, con);
1367                 __ cmplw(BOOL_RESULT, opr1->as_register(), R0);
1368               }
1369             } else {
1370               if (Assembler::is_simm(con, 16)) {
1371                 __ cmpwi(BOOL_RESULT, opr1->as_register(), con);
1372               } else {
1373                 __ load_const_optimized(R0, con);
1374                 __ cmpw(BOOL_RESULT, opr1->as_register(), R0);
1375               }
1376             }
1377           }
1378           break;
1379 
1380         case T_OBJECT:
1381           // There are only equal/notequal comparisons on objects.
1382           {
1383             assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops");
1384             jobject con = opr2->as_constant_ptr()->as_jobject();
1385             if (con == NULL) {
1386               __ cmpdi(BOOL_RESULT, opr1->as_register(), 0);
1387             } else {
1388               jobject2reg(con, R0);
1389               __ cmpd(BOOL_RESULT, opr1->as_register(), R0);
1390             }
1391           }
1392           break;
1393 
1394         default:
1395           ShouldNotReachHere();
1396           break;
1397       }
1398     } else {
1399       if (opr2->is_address()) {
1400         DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment.
1401         LIR_Address *addr = opr2->as_address_ptr();
1402         BasicType type = addr->type();
1403         if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); }
1404         else                  { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); }
1405         __ cmpd(BOOL_RESULT, opr1->as_register(), R0);
1406       } else {
1407         if (unsigned_comp) {
1408           __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1409         } else {
1410           __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register());
1411         }
1412       }
1413     }
1414   } else if (opr1->is_double_cpu()) {
1415     if (opr2->is_constant()) {
1416       jlong con = opr2->as_constant_ptr()->as_jlong();
1417       if (unsigned_comp) {
1418         if (Assembler::is_uimm(con, 16)) {
1419           __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con);
1420         } else {
1421           __ load_const_optimized(R0, con);
1422           __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0);
1423         }
1424       } else {
1425         if (Assembler::is_simm(con, 16)) {
1426           __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con);
1427         } else {
1428           __ load_const_optimized(R0, con);
1429           __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0);
1430         }
1431       }
1432     } else if (opr2->is_register()) {
1433       if (unsigned_comp) {
1434         __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());
1435       } else {
1436         __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo());
1437       }
1438     } else {
1439       ShouldNotReachHere();
1440     }
1441   } else if (opr1->is_address()) {
1442     DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment.
1443     LIR_Address * addr = opr1->as_address_ptr();
1444     BasicType type = addr->type();
1445     assert (opr2->is_constant(), "Checking");
1446     if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); }
1447     else                  { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); }
1448     __ cmpdi(BOOL_RESULT, R0, opr2->as_constant_ptr()->as_jint());
1449   } else {
1450     ShouldNotReachHere();
1451   }
1452 }
1453 
1454 
1455 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1456   const Register Rdst = dst->as_register();
1457   Label done;
1458   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1459     bool is_unordered_less = (code == lir_ucmp_fd2i);
1460     if (left->is_single_fpu()) {
1461       __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg());
1462     } else if (left->is_double_fpu()) {
1463       __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg());
1464     } else {
1465       ShouldNotReachHere();
1466     }
1467     __ li(Rdst, is_unordered_less ? -1 : 1);
1468     __ bso(CCR0, done);
1469   } else if (code == lir_cmp_l2i) {
1470     __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo());
1471   } else {
1472     ShouldNotReachHere();
1473   }
1474   __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1475   __ srwi(Rdst, R0, 30);
1476   __ srawi(R0, R0, 31);
1477   __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1
1478   __ bind(done);
1479 }
1480 
1481 
1482 inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) {
1483   if (src->is_constant()) {
1484     lasm->const2reg(src, dst, lir_patch_none, NULL);
1485   } else if (src->is_register()) {
1486     lasm->reg2reg(src, dst);
1487   } else if (src->is_stack()) {
1488     lasm->stack2reg(src, dst, dst->type());
1489   } else {
1490     ShouldNotReachHere();
1491   }
1492 }
1493 
1494 
1495 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
1496   if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) {
1497     load_to_reg(this, opr1, result); // Condition doesn't matter.
1498     return;
1499   }
1500 
1501   bool positive = false;
1502   Assembler::Condition cond = Assembler::equal;
1503   switch (condition) {
1504     case lir_cond_equal:        positive = true ; cond = Assembler::equal  ; break;
1505     case lir_cond_notEqual:     positive = false; cond = Assembler::equal  ; break;
1506     case lir_cond_less:         positive = true ; cond = Assembler::less   ; break;
1507     case lir_cond_belowEqual:
1508     case lir_cond_lessEqual:    positive = false; cond = Assembler::greater; break;
1509     case lir_cond_greater:      positive = true ; cond = Assembler::greater; break;
1510     case lir_cond_aboveEqual:
1511     case lir_cond_greaterEqual: positive = false; cond = Assembler::less   ; break;
1512     default:                    ShouldNotReachHere();
1513   }
1514 
1515   // Try to use isel on >=Power7.
1516   if (VM_Version::has_isel() && result->is_cpu_register()) {
1517     bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register();
1518     const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo();
1519 
1520     // We can use result_reg to load one operand if not already in register.
1521     Register first  = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg,
1522              second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg;
1523 
1524     if (first != second) {
1525       if (!o1_is_reg) {
1526         load_to_reg(this, opr1, result);
1527       }
1528 
1529       if (!o2_is_reg) {
1530         load_to_reg(this, opr2, result);
1531       }
1532 
1533       __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second);
1534       return;
1535     }
1536   } // isel
1537 
1538   load_to_reg(this, opr1, result);
1539 
1540   Label skip;
1541   int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1542   int bi = Assembler::bi0(BOOL_RESULT, cond);
1543   __ bc(bo, bi, skip);
1544 
1545   load_to_reg(this, opr2, result);
1546   __ bind(skip);
1547 }
1548 
1549 
1550 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest,
1551                              CodeEmitInfo* info, bool pop_fpu_stack) {
1552   assert(info == NULL, "unused on this code path");
1553   assert(left->is_register(), "wrong items state");
1554   assert(dest->is_register(), "wrong items state");
1555 
1556   if (right->is_register()) {
1557     if (dest->is_float_kind()) {
1558 
1559       FloatRegister lreg, rreg, res;
1560       if (right->is_single_fpu()) {
1561         lreg = left->as_float_reg();
1562         rreg = right->as_float_reg();
1563         res  = dest->as_float_reg();
1564         switch (code) {
1565           case lir_add: __ fadds(res, lreg, rreg); break;
1566           case lir_sub: __ fsubs(res, lreg, rreg); break;
1567           case lir_mul: // fall through
1568           case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break;
1569           case lir_div: // fall through
1570           case lir_div_strictfp: __ fdivs(res, lreg, rreg); break;
1571           default: ShouldNotReachHere();
1572         }
1573       } else {
1574         lreg = left->as_double_reg();
1575         rreg = right->as_double_reg();
1576         res  = dest->as_double_reg();
1577         switch (code) {
1578           case lir_add: __ fadd(res, lreg, rreg); break;
1579           case lir_sub: __ fsub(res, lreg, rreg); break;
1580           case lir_mul: // fall through
1581           case lir_mul_strictfp: __ fmul(res, lreg, rreg); break;
1582           case lir_div: // fall through
1583           case lir_div_strictfp: __ fdiv(res, lreg, rreg); break;
1584           default: ShouldNotReachHere();
1585         }
1586       }
1587 
1588     } else if (dest->is_double_cpu()) {
1589 
1590       Register dst_lo = dest->as_register_lo();
1591       Register op1_lo = left->as_pointer_register();
1592       Register op2_lo = right->as_pointer_register();
1593 
1594       switch (code) {
1595         case lir_add: __ add(dst_lo, op1_lo, op2_lo); break;
1596         case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break;
1597         case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break;
1598         default: ShouldNotReachHere();
1599       }
1600     } else {
1601       assert (right->is_single_cpu(), "Just Checking");
1602 
1603       Register lreg = left->as_register();
1604       Register res  = dest->as_register();
1605       Register rreg = right->as_register();
1606       switch (code) {
1607         case lir_add:  __ add  (res, lreg, rreg); break;
1608         case lir_sub:  __ sub  (res, lreg, rreg); break;
1609         case lir_mul:  __ mullw(res, lreg, rreg); break;
1610         default: ShouldNotReachHere();
1611       }
1612     }
1613   } else {
1614     assert (right->is_constant(), "must be constant");
1615 
1616     if (dest->is_single_cpu()) {
1617       Register lreg = left->as_register();
1618       Register res  = dest->as_register();
1619       int    simm16 = right->as_constant_ptr()->as_jint();
1620 
1621       switch (code) {
1622         case lir_sub:  assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int
1623                        simm16 = -simm16;
1624         case lir_add:  if (res == lreg && simm16 == 0) break;
1625                        __ addi(res, lreg, simm16); break;
1626         case lir_mul:  if (res == lreg && simm16 == 1) break;
1627                        __ mulli(res, lreg, simm16); break;
1628         default: ShouldNotReachHere();
1629       }
1630     } else {
1631       Register lreg = left->as_pointer_register();
1632       Register res  = dest->as_register_lo();
1633       long con = right->as_constant_ptr()->as_jlong();
1634       assert(Assembler::is_simm16(con), "must be simm16");
1635 
1636       switch (code) {
1637         case lir_sub:  assert(Assembler::is_simm16(-con), "cannot encode");  // see do_ArithmeticOp_Long
1638                        con = -con;
1639         case lir_add:  if (res == lreg && con == 0) break;
1640                        __ addi(res, lreg, (int)con); break;
1641         case lir_mul:  if (res == lreg && con == 1) break;
1642                        __ mulli(res, lreg, (int)con); break;
1643         default: ShouldNotReachHere();
1644       }
1645     }
1646   }
1647 }
1648 
1649 
1650 void LIR_Assembler::fpop() {
1651   Unimplemented();
1652   // do nothing
1653 }
1654 
1655 
1656 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1657   switch (code) {
1658     case lir_sqrt: {
1659       __ fsqrt(dest->as_double_reg(), value->as_double_reg());
1660       break;
1661     }
1662     case lir_abs: {
1663       __ fabs(dest->as_double_reg(), value->as_double_reg());
1664       break;
1665     }
1666     default: {
1667       ShouldNotReachHere();
1668       break;
1669     }
1670   }
1671 }
1672 
1673 
1674 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1675   if (right->is_constant()) { // see do_LogicOp
1676     long uimm;
1677     Register d, l;
1678     if (dest->is_single_cpu()) {
1679       uimm = right->as_constant_ptr()->as_jint();
1680       d = dest->as_register();
1681       l = left->as_register();
1682     } else {
1683       uimm = right->as_constant_ptr()->as_jlong();
1684       d = dest->as_register_lo();
1685       l = left->as_register_lo();
1686     }
1687     long uimms  = (unsigned long)uimm >> 16,
1688          uimmss = (unsigned long)uimm >> 32;
1689 
1690     switch (code) {
1691       case lir_logic_and:
1692         if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) {
1693           __ andi(d, l, uimm); // special cases
1694         } else if (uimms != 0) { __ andis_(d, l, uimms); }
1695         else { __ andi_(d, l, uimm); }
1696         break;
1697 
1698       case lir_logic_or:
1699         if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); }
1700         else { __ ori(d, l, uimm); }
1701         break;
1702 
1703       case lir_logic_xor:
1704         if (uimm == -1) { __ nand(d, l, l); } // special case
1705         else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); }
1706         else { __ xori(d, l, uimm); }
1707         break;
1708 
1709       default: ShouldNotReachHere();
1710     }
1711   } else {
1712     assert(right->is_register(), "right should be in register");
1713 
1714     if (dest->is_single_cpu()) {
1715       switch (code) {
1716         case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break;
1717         case lir_logic_or:  __ orr (dest->as_register(), left->as_register(), right->as_register()); break;
1718         case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break;
1719         default: ShouldNotReachHere();
1720       }
1721     } else {
1722       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
1723                                                                         left->as_register_lo();
1724       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
1725                                                                           right->as_register_lo();
1726 
1727       switch (code) {
1728         case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break;
1729         case lir_logic_or:  __ orr (dest->as_register_lo(), l, r); break;
1730         case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break;
1731         default: ShouldNotReachHere();
1732       }
1733     }
1734   }
1735 }
1736 
1737 
1738 int LIR_Assembler::shift_amount(BasicType t) {
1739   int elem_size = type2aelembytes(t);
1740   switch (elem_size) {
1741     case 1 : return 0;
1742     case 2 : return 1;
1743     case 4 : return 2;
1744     case 8 : return 3;
1745   }
1746   ShouldNotReachHere();
1747   return -1;
1748 }
1749 
1750 
1751 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
1752   info->add_register_oop(exceptionOop);
1753 
1754   // Reuse the debug info from the safepoint poll for the throw op itself.
1755   address pc_for_athrow = __ pc();
1756   int pc_for_athrow_offset = __ offset();
1757   //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
1758   //__ relocate(rspec);
1759   //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0);
1760   __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true);
1761   add_call_info(pc_for_athrow_offset, info); // for exception handler
1762 
1763   address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id
1764                                                                    : Runtime1::handle_exception_nofpu_id);
1765   //__ load_const_optimized(R0, stub);
1766   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
1767   __ mtctr(R0);
1768   __ bctr();
1769 }
1770 
1771 
1772 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
1773   // Note: Not used with EnableDebuggingOnDemand.
1774   assert(exceptionOop->as_register() == R3, "should match");
1775   __ b(_unwind_handler_entry);
1776 }
1777 
1778 
1779 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
1780   Register src = op->src()->as_register();
1781   Register dst = op->dst()->as_register();
1782   Register src_pos = op->src_pos()->as_register();
1783   Register dst_pos = op->dst_pos()->as_register();
1784   Register length  = op->length()->as_register();
1785   Register tmp = op->tmp()->as_register();
1786   Register tmp2 = R0;
1787 
1788   int flags = op->flags();
1789   ciArrayKlass* default_type = op->expected_type();
1790   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
1791   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
1792 
1793   // Set up the arraycopy stub information.
1794   ArrayCopyStub* stub = op->stub();
1795   const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame.
1796 
1797   // Always do stub if no type information is available. It's ok if
1798   // the known type isn't loaded since the code sanity checks
1799   // in debug mode and the type isn't required when we know the exact type
1800   // also check that the type is an array type.
1801   if (op->expected_type() == NULL) {
1802     assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&
1803            length->is_nonvolatile(), "must preserve");
1804     // 3 parms are int. Convert to long.
1805     __ mr(R3_ARG1, src);
1806     __ extsw(R4_ARG2, src_pos);
1807     __ mr(R5_ARG3, dst);
1808     __ extsw(R6_ARG4, dst_pos);
1809     __ extsw(R7_ARG5, length);
1810     address copyfunc_addr = StubRoutines::generic_arraycopy();
1811 
1812     if (copyfunc_addr == NULL) { // Use C version if stub was not generated.
1813       address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
1814       __ call_c_with_frame_resize(entry, frame_resize);
1815     } else {
1816 #ifndef PRODUCT
1817       if (PrintC1Statistics) {
1818         address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
1819         int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
1820         __ lwz(R11_scratch1, simm16_offs, tmp);
1821         __ addi(R11_scratch1, R11_scratch1, 1);
1822         __ stw(R11_scratch1, simm16_offs, tmp);
1823       }
1824 #endif
1825       __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
1826 
1827       __ nand(tmp, R3_RET, R3_RET);
1828       __ subf(length, tmp, length);
1829       __ add(src_pos, tmp, src_pos);
1830       __ add(dst_pos, tmp, dst_pos);
1831     }
1832 
1833     __ cmpwi(CCR0, R3_RET, 0);
1834     __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());
1835     __ bind(*stub->continuation());
1836     return;
1837   }
1838 
1839   assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
1840   Label cont, slow, copyfunc;
1841 
1842   bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check |
1843                                         LIR_OpArrayCopy::dst_null_check |
1844                                         LIR_OpArrayCopy::src_pos_positive_check |
1845                                         LIR_OpArrayCopy::dst_pos_positive_check |
1846                                         LIR_OpArrayCopy::length_positive_check);
1847 
1848   // Use only one conditional branch for simple checks.
1849   if (simple_check_flag_set) {
1850     ConditionRegister combined_check = CCR1, tmp_check = CCR1;
1851 
1852     // Make sure src and dst are non-null.
1853     if (flags & LIR_OpArrayCopy::src_null_check) {
1854       __ cmpdi(combined_check, src, 0);
1855       tmp_check = CCR0;
1856     }
1857 
1858     if (flags & LIR_OpArrayCopy::dst_null_check) {
1859       __ cmpdi(tmp_check, dst, 0);
1860       if (tmp_check != combined_check) {
1861         __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal);
1862       }
1863       tmp_check = CCR0;
1864     }
1865 
1866     // Clear combined_check.eq if not already used.
1867     if (tmp_check == combined_check) {
1868       __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal);
1869       tmp_check = CCR0;
1870     }
1871 
1872     if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
1873       // Test src_pos register.
1874       __ cmpwi(tmp_check, src_pos, 0);
1875       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1876     }
1877 
1878     if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
1879       // Test dst_pos register.
1880       __ cmpwi(tmp_check, dst_pos, 0);
1881       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1882     }
1883 
1884     if (flags & LIR_OpArrayCopy::length_positive_check) {
1885       // Make sure length isn't negative.
1886       __ cmpwi(tmp_check, length, 0);
1887       __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less);
1888     }
1889 
1890     __ beq(combined_check, slow);
1891   }
1892 
1893   // Higher 32bits must be null.
1894   __ extsw(length, length);
1895 
1896   __ extsw(src_pos, src_pos);
1897   if (flags & LIR_OpArrayCopy::src_range_check) {
1898     __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src);
1899     __ add(tmp, length, src_pos);
1900     __ cmpld(CCR0, tmp2, tmp);
1901     __ ble(CCR0, slow);
1902   }
1903 
1904   __ extsw(dst_pos, dst_pos);
1905   if (flags & LIR_OpArrayCopy::dst_range_check) {
1906     __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst);
1907     __ add(tmp, length, dst_pos);
1908     __ cmpld(CCR0, tmp2, tmp);
1909     __ ble(CCR0, slow);
1910   }
1911 
1912   int shift = shift_amount(basic_type);
1913 
1914   if (!(flags & LIR_OpArrayCopy::type_check)) {
1915     __ b(cont);
1916   } else {
1917     // We don't know the array types are compatible.
1918     if (basic_type != T_OBJECT) {
1919       // Simple test for basic type arrays.
1920       if (UseCompressedClassPointers) {
1921         // We don't need decode because we just need to compare.
1922         __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src);
1923         __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst);
1924         __ cmpw(CCR0, tmp, tmp2);
1925       } else {
1926         __ ld(tmp, oopDesc::klass_offset_in_bytes(), src);
1927         __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst);
1928         __ cmpd(CCR0, tmp, tmp2);
1929       }
1930       __ beq(CCR0, cont);
1931     } else {
1932       // For object arrays, if src is a sub class of dst then we can
1933       // safely do the copy.
1934       address copyfunc_addr = StubRoutines::checkcast_arraycopy();
1935 
1936       const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf
1937       assert_different_registers(tmp, tmp2, sub_klass, super_klass);
1938 
1939       __ load_klass(sub_klass, src);
1940       __ load_klass(super_klass, dst);
1941 
1942       __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,
1943                                        &cont, copyfunc_addr != NULL ? &copyfunc : &slow, NULL);
1944 
1945       address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
1946       //__ load_const_optimized(tmp, slow_stc, tmp2);
1947       __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
1948       __ mtctr(tmp);
1949       __ bctrl(); // sets CR0
1950       __ beq(CCR0, cont);
1951 
1952       if (copyfunc_addr != NULL) { // Use stub if available.
1953         __ bind(copyfunc);
1954         // Src is not a sub class of dst so we have to do a
1955         // per-element check.
1956         int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray;
1957         if ((flags & mask) != mask) {
1958           assert(flags & mask, "one of the two should be known to be an object array");
1959 
1960           if (!(flags & LIR_OpArrayCopy::src_objarray)) {
1961             __ load_klass(tmp, src);
1962           } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
1963             __ load_klass(tmp, dst);
1964           }
1965 
1966           __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
1967 
1968           jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
1969           __ load_const_optimized(tmp, objArray_lh);
1970           __ cmpw(CCR0, tmp, tmp2);
1971           __ bne(CCR0, slow);
1972         }
1973 
1974         Register src_ptr = R3_ARG1;
1975         Register dst_ptr = R4_ARG2;
1976         Register len     = R5_ARG3;
1977         Register chk_off = R6_ARG4;
1978         Register super_k = R7_ARG5;
1979 
1980         __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
1981         __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
1982         if (shift == 0) {
1983           __ add(src_ptr, src_pos, src_ptr);
1984           __ add(dst_ptr, dst_pos, dst_ptr);
1985         } else {
1986           __ sldi(tmp, src_pos, shift);
1987           __ sldi(tmp2, dst_pos, shift);
1988           __ add(src_ptr, tmp, src_ptr);
1989           __ add(dst_ptr, tmp2, dst_ptr);
1990         }
1991 
1992         __ load_klass(tmp, dst);
1993         __ mr(len, length);
1994 
1995         int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
1996         __ ld(super_k, ek_offset, tmp);
1997 
1998         int sco_offset = in_bytes(Klass::super_check_offset_offset());
1999         __ lwz(chk_off, sco_offset, super_k);
2000 
2001         __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
2002 
2003 #ifndef PRODUCT
2004         if (PrintC1Statistics) {
2005           Label failed;
2006           __ cmpwi(CCR0, R3_RET, 0);
2007           __ bne(CCR0, failed);
2008           address counter = (address)&Runtime1::_arraycopy_checkcast_cnt;
2009           int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2010           __ lwz(R11_scratch1, simm16_offs, tmp);
2011           __ addi(R11_scratch1, R11_scratch1, 1);
2012           __ stw(R11_scratch1, simm16_offs, tmp);
2013           __ bind(failed);
2014         }
2015 #endif
2016 
2017         __ nand(tmp, R3_RET, R3_RET);
2018         __ cmpwi(CCR0, R3_RET, 0);
2019         __ beq(CCR0, *stub->continuation());
2020 
2021 #ifndef PRODUCT
2022         if (PrintC1Statistics) {
2023           address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt;
2024           int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2025           __ lwz(R11_scratch1, simm16_offs, tmp);
2026           __ addi(R11_scratch1, R11_scratch1, 1);
2027           __ stw(R11_scratch1, simm16_offs, tmp);
2028         }
2029 #endif
2030 
2031         __ subf(length, tmp, length);
2032         __ add(src_pos, tmp, src_pos);
2033         __ add(dst_pos, tmp, dst_pos);
2034       }
2035     }
2036   }
2037   __ bind(slow);
2038   __ b(*stub->entry());
2039   __ bind(cont);
2040 
2041 #ifdef ASSERT
2042   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2043     // Sanity check the known type with the incoming class. For the
2044     // primitive case the types must match exactly with src.klass and
2045     // dst.klass each exactly matching the default type. For the
2046     // object array case, if no type check is needed then either the
2047     // dst type is exactly the expected type and the src type is a
2048     // subtype which we can't check or src is the same array as dst
2049     // but not necessarily exactly of type default_type.
2050     Label known_ok, halt;
2051     metadata2reg(op->expected_type()->constant_encoding(), tmp);
2052     if (UseCompressedClassPointers) {
2053       // Tmp holds the default type. It currently comes uncompressed after the
2054       // load of a constant, so encode it.
2055       __ encode_klass_not_null(tmp);
2056       // Load the raw value of the dst klass, since we will be comparing
2057       // uncompressed values directly.
2058       __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst);
2059       __ cmpw(CCR0, tmp, tmp2);
2060       if (basic_type != T_OBJECT) {
2061         __ bne(CCR0, halt);
2062         // Load the raw value of the src klass.
2063         __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src);
2064         __ cmpw(CCR0, tmp, tmp2);
2065         __ beq(CCR0, known_ok);
2066       } else {
2067         __ beq(CCR0, known_ok);
2068         __ cmpw(CCR0, src, dst);
2069         __ beq(CCR0, known_ok);
2070       }
2071     } else {
2072       __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst);
2073       __ cmpd(CCR0, tmp, tmp2);
2074       if (basic_type != T_OBJECT) {
2075         __ bne(CCR0, halt);
2076         // Load the raw value of the src klass.
2077         __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src);
2078         __ cmpd(CCR0, tmp, tmp2);
2079         __ beq(CCR0, known_ok);
2080       } else {
2081         __ beq(CCR0, known_ok);
2082         __ cmpd(CCR0, src, dst);
2083         __ beq(CCR0, known_ok);
2084       }
2085     }
2086     __ bind(halt);
2087     __ stop("incorrect type information in arraycopy");
2088     __ bind(known_ok);
2089   }
2090 #endif
2091 
2092 #ifndef PRODUCT
2093   if (PrintC1Statistics) {
2094     address counter = Runtime1::arraycopy_count_address(basic_type);
2095     int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
2096     __ lwz(R11_scratch1, simm16_offs, tmp);
2097     __ addi(R11_scratch1, R11_scratch1, 1);
2098     __ stw(R11_scratch1, simm16_offs, tmp);
2099   }
2100 #endif
2101 
2102   Register src_ptr = R3_ARG1;
2103   Register dst_ptr = R4_ARG2;
2104   Register len     = R5_ARG3;
2105 
2106   __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type));
2107   __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type));
2108   if (shift == 0) {
2109     __ add(src_ptr, src_pos, src_ptr);
2110     __ add(dst_ptr, dst_pos, dst_ptr);
2111   } else {
2112     __ sldi(tmp, src_pos, shift);
2113     __ sldi(tmp2, dst_pos, shift);
2114     __ add(src_ptr, tmp, src_ptr);
2115     __ add(dst_ptr, tmp2, dst_ptr);
2116   }
2117 
2118   bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0;
2119   bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0;
2120   const char *name;
2121   address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false);
2122 
2123   // Arraycopy stubs takes a length in number of elements, so don't scale it.
2124   __ mr(len, length);
2125   __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0);
2126 
2127   __ bind(*stub->continuation());
2128 }
2129 
2130 
2131 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2132   if (dest->is_single_cpu()) {
2133     __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5);
2134 #ifdef _LP64
2135     if (left->type() == T_OBJECT) {
2136       switch (code) {
2137         case lir_shl:  __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break;
2138         case lir_shr:  __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break;
2139         case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break;
2140         default: ShouldNotReachHere();
2141       }
2142     } else
2143 #endif
2144       switch (code) {
2145         case lir_shl:  __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2146         case lir_shr:  __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2147         case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break;
2148         default: ShouldNotReachHere();
2149       }
2150   } else {
2151     __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6);
2152     switch (code) {
2153       case lir_shl:  __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2154       case lir_shr:  __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2155       case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break;
2156       default: ShouldNotReachHere();
2157     }
2158   }
2159 }
2160 
2161 
2162 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2163 #ifdef _LP64
2164   if (left->type() == T_OBJECT) {
2165     count = count & 63;  // Shouldn't shift by more than sizeof(intptr_t).
2166     if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); }
2167     else {
2168       switch (code) {
2169         case lir_shl:  __ sldi(dest->as_register_lo(), left->as_register(), count); break;
2170         case lir_shr:  __ sradi(dest->as_register_lo(), left->as_register(), count); break;
2171         case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break;
2172         default: ShouldNotReachHere();
2173       }
2174     }
2175     return;
2176   }
2177 #endif
2178 
2179   if (dest->is_single_cpu()) {
2180     count = count & 0x1F; // Java spec
2181     if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); }
2182     else {
2183       switch (code) {
2184         case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break;
2185         case lir_shr:  __ srawi(dest->as_register(), left->as_register(), count); break;
2186         case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break;
2187         default: ShouldNotReachHere();
2188       }
2189     }
2190   } else if (dest->is_double_cpu()) {
2191     count = count & 63; // Java spec
2192     if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); }
2193     else {
2194       switch (code) {
2195         case lir_shl:  __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2196         case lir_shr:  __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2197         case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break;
2198         default: ShouldNotReachHere();
2199       }
2200     }
2201   } else {
2202     ShouldNotReachHere();
2203   }
2204 }
2205 
2206 
2207 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2208   if (op->init_check()) {
2209     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2210       explicit_null_check(op->klass()->as_register(), op->stub()->info());
2211     } else {
2212       add_debug_info_for_null_check_here(op->stub()->info());
2213     }
2214     __ lbz(op->tmp1()->as_register(),
2215            in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
2216     __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2217     __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
2218   }
2219   __ allocate_object(op->obj()->as_register(),
2220                      op->tmp1()->as_register(),
2221                      op->tmp2()->as_register(),
2222                      op->tmp3()->as_register(),
2223                      op->header_size(),
2224                      op->object_size(),
2225                      op->klass()->as_register(),
2226                      *op->stub()->entry());
2227 
2228   __ bind(*op->stub()->continuation());
2229   __ verify_oop(op->obj()->as_register());
2230 }
2231 
2232 
2233 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2234   LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
2235   if (UseSlowPath ||
2236       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2237       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2238     __ b(*op->stub()->entry());
2239   } else {
2240     __ allocate_array(op->obj()->as_register(),
2241                       op->len()->as_register(),
2242                       op->tmp1()->as_register(),
2243                       op->tmp2()->as_register(),
2244                       op->tmp3()->as_register(),
2245                       arrayOopDesc::header_size(op->type()),
2246                       type2aelembytes(op->type()),
2247                       op->klass()->as_register(),
2248                       *op->stub()->entry());
2249   }
2250   __ bind(*op->stub()->continuation());
2251 }
2252 
2253 
2254 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2255                                         ciMethodData *md, ciProfileData *data,
2256                                         Register recv, Register tmp1, Label* update_done) {
2257   uint i;
2258   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2259     Label next_test;
2260     // See if the receiver is receiver[n].
2261     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2262     __ verify_klass_ptr(tmp1);
2263     __ cmpd(CCR0, recv, tmp1);
2264     __ bne(CCR0, next_test);
2265 
2266     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2267     __ addi(tmp1, tmp1, DataLayout::counter_increment);
2268     __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2269     __ b(*update_done);
2270 
2271     __ bind(next_test);
2272   }
2273 
2274   // Didn't find receiver; find next empty slot and fill it in.
2275   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2276     Label next_test;
2277     __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2278     __ cmpdi(CCR0, tmp1, 0);
2279     __ bne(CCR0, next_test);
2280     __ li(tmp1, DataLayout::counter_increment);
2281     __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
2282     __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2283     __ b(*update_done);
2284 
2285     __ bind(next_test);
2286   }
2287 }
2288 
2289 
2290 void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
2291                                     ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
2292   md = method->method_data_or_null();
2293   assert(md != NULL, "Sanity");
2294   data = md->bci_to_data(bci);
2295   assert(data != NULL,       "need data for checkcast");
2296   assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
2297   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2298     // The offset is large so bias the mdo by the base of the slot so
2299     // that the ld can use simm16s to reference the slots of the data.
2300     mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2301   }
2302 }
2303 
2304 
2305 void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
2306   Register obj = op->object()->as_register();
2307   Register k_RInfo = op->tmp1()->as_register();
2308   Register klass_RInfo = op->tmp2()->as_register();
2309   Register Rtmp1 = op->tmp3()->as_register();
2310   Register dst = op->result_opr()->as_register();
2311   ciKlass* k = op->klass();
2312   bool should_profile = op->should_profile();
2313   bool move_obj_to_dst = (op->code() == lir_checkcast);
2314   // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps.
2315   bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1);
2316   bool restore_obj = move_obj_to_dst && reg_conflict;
2317 
2318   __ cmpdi(CCR0, obj, 0);
2319   if (move_obj_to_dst || reg_conflict) {
2320     __ mr_if_needed(dst, obj);
2321     if (reg_conflict) { obj = dst; }
2322   }
2323 
2324   ciMethodData* md;
2325   ciProfileData* data;
2326   int mdo_offset_bias = 0;
2327   if (should_profile) {
2328     ciMethod* method = op->profiled_method();
2329     assert(method != NULL, "Should have method");
2330     setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2331 
2332     Register mdo      = k_RInfo;
2333     Register data_val = Rtmp1;
2334     Label not_null;
2335     __ bne(CCR0, not_null);
2336     metadata2reg(md->constant_encoding(), mdo);
2337     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2338     __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2339     __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2340     __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2341     __ b(*obj_is_null);
2342     __ bind(not_null);
2343   } else {
2344     __ beq(CCR0, *obj_is_null);
2345   }
2346 
2347   // get object class
2348   __ load_klass(klass_RInfo, obj);
2349 
2350   if (k->is_loaded()) {
2351     metadata2reg(k->constant_encoding(), k_RInfo);
2352   } else {
2353     klass2reg_with_patching(k_RInfo, op->info_for_patch());
2354   }
2355 
2356   Label profile_cast_failure, failure_restore_obj, profile_cast_success;
2357   Label *failure_target = should_profile ? &profile_cast_failure : failure;
2358   Label *success_target = should_profile ? &profile_cast_success : success;
2359 
2360   if (op->fast_check()) {
2361     assert_different_registers(klass_RInfo, k_RInfo);
2362     __ cmpd(CCR0, k_RInfo, klass_RInfo);
2363     if (should_profile) {
2364       __ bne(CCR0, *failure_target);
2365       // Fall through to success case.
2366     } else {
2367       __ beq(CCR0, *success);
2368       // Fall through to failure case.
2369     }
2370   } else {
2371     bool need_slow_path = true;
2372     if (k->is_loaded()) {
2373       if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) {
2374         need_slow_path = false;
2375       }
2376       // Perform the fast part of the checking logic.
2377       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL),
2378                                        failure_target, NULL, RegisterOrConstant(k->super_check_offset()));
2379     } else {
2380       // Perform the fast part of the checking logic.
2381       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target);
2382     }
2383     if (!need_slow_path) {
2384       if (!should_profile) { __ b(*success); }
2385     } else {
2386       // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2387       address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2388       //__ load_const_optimized(Rtmp1, entry, R0);
2389       __ calculate_address_from_global_toc(Rtmp1, entry, true, true, false);
2390       __ mtctr(Rtmp1);
2391       __ bctrl(); // sets CR0
2392       if (should_profile) {
2393         __ bne(CCR0, *failure_target);
2394         // Fall through to success case.
2395       } else {
2396         __ beq(CCR0, *success);
2397         // Fall through to failure case.
2398       }
2399     }
2400   }
2401 
2402   if (should_profile) {
2403     Register mdo = k_RInfo, recv = klass_RInfo;
2404     assert_different_registers(mdo, recv, Rtmp1);
2405     __ bind(profile_cast_success);
2406     metadata2reg(md->constant_encoding(), mdo);
2407     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2408     type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success);
2409     __ b(*success);
2410 
2411     // Cast failure case.
2412     __ bind(profile_cast_failure);
2413     metadata2reg(md->constant_encoding(), mdo);
2414     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2415     __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2416     __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);
2417     __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2418   }
2419 
2420   __ bind(*failure);
2421 
2422   if (restore_obj) {
2423     __ mr(op->object()->as_register(), dst);
2424     // Fall through to failure case.
2425   }
2426 }
2427 
2428 
2429 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2430   LIR_Code code = op->code();
2431   if (code == lir_store_check) {
2432     Register value = op->object()->as_register();
2433     Register array = op->array()->as_register();
2434     Register k_RInfo = op->tmp1()->as_register();
2435     Register klass_RInfo = op->tmp2()->as_register();
2436     Register Rtmp1 = op->tmp3()->as_register();
2437     bool should_profile = op->should_profile();
2438 
2439     __ verify_oop(value);
2440     CodeStub* stub = op->stub();
2441     // Check if it needs to be profiled.
2442     ciMethodData* md;
2443     ciProfileData* data;
2444     int mdo_offset_bias = 0;
2445     if (should_profile) {
2446       ciMethod* method = op->profiled_method();
2447       assert(method != NULL, "Should have method");
2448       setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2449     }
2450     Label profile_cast_success, failure, done;
2451     Label *success_target = should_profile ? &profile_cast_success : &done;
2452 
2453     __ cmpdi(CCR0, value, 0);
2454     if (should_profile) {
2455       Label not_null;
2456       __ bne(CCR0, not_null);
2457       Register mdo      = k_RInfo;
2458       Register data_val = Rtmp1;
2459       metadata2reg(md->constant_encoding(), mdo);
2460       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2461       __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2462       __ ori(data_val, data_val, BitData::null_seen_byte_constant());
2463       __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
2464       __ b(done);
2465       __ bind(not_null);
2466     } else {
2467       __ beq(CCR0, done);
2468     }
2469     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2470       explicit_null_check(array, op->info_for_exception());
2471     } else {
2472       add_debug_info_for_null_check_here(op->info_for_exception());
2473     }
2474     __ load_klass(k_RInfo, array);
2475     __ load_klass(klass_RInfo, value);
2476 
2477     // Get instance klass.
2478     __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo);
2479     // Perform the fast part of the checking logic.
2480     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL);
2481 
2482     // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
2483     const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
2484     //__ load_const_optimized(R0, slow_path);
2485     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
2486     __ mtctr(R0);
2487     __ bctrl(); // sets CR0
2488     if (!should_profile) {
2489       __ beq(CCR0, done);
2490       __ bind(failure);
2491     } else {
2492       __ bne(CCR0, failure);
2493       // Fall through to the success case.
2494 
2495       Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
2496       assert_different_registers(value, mdo, recv, tmp1);
2497       __ bind(profile_cast_success);
2498       metadata2reg(md->constant_encoding(), mdo);
2499       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2500       __ load_klass(recv, value);
2501       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
2502       __ b(done);
2503 
2504       // Cast failure case.
2505       __ bind(failure);
2506       metadata2reg(md->constant_encoding(), mdo);
2507       __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2508       Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2509       __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2510       __ addi(tmp1, tmp1, -DataLayout::counter_increment);
2511       __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2512     }
2513     __ b(*stub->entry());
2514     __ bind(done);
2515 
2516   } else if (code == lir_checkcast) {
2517     Label success, failure;
2518     emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); // Moves obj to dst.
2519     __ b(*op->stub()->entry());
2520     __ align(32, 12);
2521     __ bind(success);
2522   } else if (code == lir_instanceof) {
2523     Register dst = op->result_opr()->as_register();
2524     Label success, failure, done;
2525     emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure);
2526     __ li(dst, 0);
2527     __ b(done);
2528     __ align(32, 12);
2529     __ bind(success);
2530     __ li(dst, 1);
2531     __ bind(done);
2532   } else {
2533     ShouldNotReachHere();
2534   }
2535 }
2536 
2537 
2538 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2539   Register addr = op->addr()->as_pointer_register();
2540   Register cmp_value = noreg, new_value = noreg;
2541   bool is_64bit = false;
2542 
2543   if (op->code() == lir_cas_long) {
2544     cmp_value = op->cmp_value()->as_register_lo();
2545     new_value = op->new_value()->as_register_lo();
2546     is_64bit = true;
2547   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2548     cmp_value = op->cmp_value()->as_register();
2549     new_value = op->new_value()->as_register();
2550     if (op->code() == lir_cas_obj) {
2551       if (UseCompressedOops) {
2552         Register t1 = op->tmp1()->as_register();
2553         Register t2 = op->tmp2()->as_register();
2554         cmp_value = __ encode_heap_oop(t1, cmp_value);
2555         new_value = __ encode_heap_oop(t2, new_value);
2556       } else {
2557         is_64bit = true;
2558       }
2559     }
2560   } else {
2561     Unimplemented();
2562   }
2563 
2564   if (is_64bit) {
2565     __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
2566                 MacroAssembler::MemBarFenceAfter,
2567                 MacroAssembler::cmpxchgx_hint_atomic_update(),
2568                 noreg, NULL, /*check without ldarx first*/true);
2569   } else {
2570     __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr,
2571                 MacroAssembler::MemBarFenceAfter,
2572                 MacroAssembler::cmpxchgx_hint_atomic_update(),
2573                 noreg, /*check without ldarx first*/true);
2574   }
2575 }
2576 
2577 
2578 void LIR_Assembler::set_24bit_FPU() {
2579   Unimplemented();
2580 }
2581 
2582 void LIR_Assembler::reset_FPU() {
2583   Unimplemented();
2584 }
2585 
2586 
2587 void LIR_Assembler::breakpoint() {
2588   __ illtrap();
2589 }
2590 
2591 
2592 void LIR_Assembler::push(LIR_Opr opr) {
2593   Unimplemented();
2594 }
2595 
2596 void LIR_Assembler::pop(LIR_Opr opr) {
2597   Unimplemented();
2598 }
2599 
2600 
2601 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2602   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2603   Register dst = dst_opr->as_register();
2604   Register reg = mon_addr.base();
2605   int offset = mon_addr.disp();
2606   // Compute pointer to BasicLock.
2607   __ add_const_optimized(dst, reg, offset);
2608 }
2609 
2610 
2611 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2612   Register obj = op->obj_opr()->as_register();
2613   Register hdr = op->hdr_opr()->as_register();
2614   Register lock = op->lock_opr()->as_register();
2615 
2616   // Obj may not be an oop.
2617   if (op->code() == lir_lock) {
2618     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2619     if (UseFastLocking) {
2620       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2621       // Add debug info for NullPointerException only if one is possible.
2622       if (op->info() != NULL) {
2623         if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2624           explicit_null_check(obj, op->info());
2625         } else {
2626           add_debug_info_for_null_check_here(op->info());
2627         }
2628       }
2629       __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2630     } else {
2631       // always do slow locking
2632       // note: The slow locking code could be inlined here, however if we use
2633       //       slow locking, speed doesn't matter anyway and this solution is
2634       //       simpler and requires less duplicated code - additionally, the
2635       //       slow locking code is the same in either case which simplifies
2636       //       debugging.
2637       __ b(*op->stub()->entry());
2638     }
2639   } else {
2640     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2641     if (UseFastLocking) {
2642       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2643       __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2644     } else {
2645       // always do slow unlocking
2646       // note: The slow unlocking code could be inlined here, however if we use
2647       //       slow unlocking, speed doesn't matter anyway and this solution is
2648       //       simpler and requires less duplicated code - additionally, the
2649       //       slow unlocking code is the same in either case which simplifies
2650       //       debugging.
2651       __ b(*op->stub()->entry());
2652     }
2653   }
2654   __ bind(*op->stub()->continuation());
2655 }
2656 
2657 
2658 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2659   ciMethod* method = op->profiled_method();
2660   int bci          = op->profiled_bci();
2661   ciMethod* callee = op->profiled_callee();
2662 
2663   // Update counter for all call types.
2664   ciMethodData* md = method->method_data_or_null();
2665   assert(md != NULL, "Sanity");
2666   ciProfileData* data = md->bci_to_data(bci);
2667   assert(data->is_CounterData(), "need CounterData for calls");
2668   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2669   Register mdo = op->mdo()->as_register();
2670 #ifdef _LP64
2671   assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2672   Register tmp1 = op->tmp1()->as_register_lo();
2673 #else
2674   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2675   Register tmp1 = op->tmp1()->as_register();
2676 #endif
2677   metadata2reg(md->constant_encoding(), mdo);
2678   int mdo_offset_bias = 0;
2679   if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2680                             data->size_in_bytes())) {
2681     // The offset is large so bias the mdo by the base of the slot so
2682     // that the ld can use simm16s to reference the slots of the data.
2683     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2684     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2685   }
2686 
2687   Bytecodes::Code bc = method->java_code_at_bci(bci);
2688   const bool callee_is_static = callee->is_loaded() && callee->is_static();
2689   // Perform additional virtual call profiling for invokevirtual and
2690   // invokeinterface bytecodes.
2691   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2692       !callee_is_static &&  // Required for optimized MH invokes.
2693       C1ProfileVirtualCalls) {
2694     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2695     Register recv = op->recv()->as_register();
2696     assert_different_registers(mdo, tmp1, recv);
2697     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2698     ciKlass* known_klass = op->known_holder();
2699     if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
2700       // We know the type that will be seen at this call site; we can
2701       // statically update the MethodData* rather than needing to do
2702       // dynamic tests on the receiver type.
2703 
2704       // NOTE: we should probably put a lock around this search to
2705       // avoid collisions by concurrent compilations.
2706       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2707       uint i;
2708       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2709         ciKlass* receiver = vc_data->receiver(i);
2710         if (known_klass->equals(receiver)) {
2711           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2712           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2713           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2714           return;
2715         }
2716       }
2717 
2718       // Receiver type not found in profile data; select an empty slot.
2719 
2720       // Note that this is less efficient than it should be because it
2721       // always does a write to the receiver part of the
2722       // VirtualCallData rather than just the first time.
2723       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2724         ciKlass* receiver = vc_data->receiver(i);
2725         if (receiver == NULL) {
2726           metadata2reg(known_klass->constant_encoding(), tmp1);
2727           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo);
2728 
2729           __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2730           __ addi(tmp1, tmp1, DataLayout::counter_increment);
2731           __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
2732           return;
2733         }
2734       }
2735     } else {
2736       __ load_klass(recv, recv);
2737       Label update_done;
2738       type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
2739       // Receiver did not match any saved receiver and there is no empty row for it.
2740       // Increment total counter to indicate polymorphic case.
2741       __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2742       __ addi(tmp1, tmp1, DataLayout::counter_increment);
2743       __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2744 
2745       __ bind(update_done);
2746     }
2747   } else {
2748     // Static call
2749     __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2750     __ addi(tmp1, tmp1, DataLayout::counter_increment);
2751     __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2752   }
2753 }
2754 
2755 
2756 void LIR_Assembler::align_backward_branch_target() {
2757   __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary.
2758 }
2759 
2760 
2761 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
2762   Unimplemented();
2763 }
2764 
2765 
2766 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2767   assert(left->is_register(), "can only handle registers");
2768 
2769   if (left->is_single_cpu()) {
2770     __ neg(dest->as_register(), left->as_register());
2771   } else if (left->is_single_fpu()) {
2772     __ fneg(dest->as_float_reg(), left->as_float_reg());
2773   } else if (left->is_double_fpu()) {
2774     __ fneg(dest->as_double_reg(), left->as_double_reg());
2775   } else {
2776     assert (left->is_double_cpu(), "Must be a long");
2777     __ neg(dest->as_register_lo(), left->as_register_lo());
2778   }
2779 }
2780 
2781 
2782 void LIR_Assembler::fxch(int i) {
2783   Unimplemented();
2784 }
2785 
2786 void LIR_Assembler::fld(int i) {
2787   Unimplemented();
2788 }
2789 
2790 void LIR_Assembler::ffree(int i) {
2791   Unimplemented();
2792 }
2793 
2794 
2795 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2796                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2797   // Stubs: Called via rt_call, but dest is a stub address (no function descriptor).
2798   if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) ||
2799       dest == Runtime1::entry_for(Runtime1::new_multi_array_id   )) {
2800     //__ load_const_optimized(R0, dest);
2801     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));
2802     __ mtctr(R0);
2803     __ bctrl();
2804     assert(info != NULL, "sanity");
2805     add_call_info_here(info);
2806     return;
2807   }
2808 
2809   __ call_c_with_frame_resize(dest, /*no resizing*/ 0);
2810   if (info != NULL) {
2811     add_call_info_here(info);
2812   }
2813 }
2814 
2815 
2816 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2817   ShouldNotReachHere(); // Not needed on _LP64.
2818 }
2819 
2820 void LIR_Assembler::membar() {
2821   __ fence();
2822 }
2823 
2824 void LIR_Assembler::membar_acquire() {
2825   __ acquire();
2826 }
2827 
2828 void LIR_Assembler::membar_release() {
2829   __ release();
2830 }
2831 
2832 void LIR_Assembler::membar_loadload() {
2833   __ membar(Assembler::LoadLoad);
2834 }
2835 
2836 void LIR_Assembler::membar_storestore() {
2837   __ membar(Assembler::StoreStore);
2838 }
2839 
2840 void LIR_Assembler::membar_loadstore() {
2841   __ membar(Assembler::LoadStore);
2842 }
2843 
2844 void LIR_Assembler::membar_storeload() {
2845   __ membar(Assembler::StoreLoad);
2846 }
2847 
2848 
2849 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
2850   LIR_Address* addr = addr_opr->as_address_ptr();
2851   assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform");
2852   if (addr->index()->is_illegal()) {
2853     __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp());
2854   } else {
2855     assert(addr->disp() == 0, "can't have both: index and disp");
2856     __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register());
2857   }
2858 }
2859 
2860 
2861 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2862   ShouldNotReachHere();
2863 }
2864 
2865 
2866 #ifdef ASSERT
2867 // Emit run-time assertion.
2868 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2869   Unimplemented();
2870 }
2871 #endif
2872 
2873 
2874 void LIR_Assembler::peephole(LIR_List* lir) {
2875   // Optimize instruction pairs before emitting.
2876   LIR_OpList* inst = lir->instructions_list();
2877   for (int i = 1; i < inst->length(); i++) {
2878     LIR_Op* op = inst->at(i);
2879 
2880     // 2 register-register-moves
2881     if (op->code() == lir_move) {
2882       LIR_Opr in2  = ((LIR_Op1*)op)->in_opr(),
2883               res2 = ((LIR_Op1*)op)->result_opr();
2884       if (in2->is_register() && res2->is_register()) {
2885         LIR_Op* prev = inst->at(i - 1);
2886         if (prev && prev->code() == lir_move) {
2887           LIR_Opr in1  = ((LIR_Op1*)prev)->in_opr(),
2888                   res1 = ((LIR_Op1*)prev)->result_opr();
2889           if (in1->is_same_register(res2) && in2->is_same_register(res1)) {
2890             inst->remove_at(i);
2891           }
2892         }
2893       }
2894     }
2895 
2896   }
2897   return;
2898 }
2899 
2900 
2901 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
2902   const Register Rptr = src->as_pointer_register(),
2903                  Rtmp = tmp->as_register();
2904   Register Rco = noreg;
2905   if (UseCompressedOops && data->is_oop()) {
2906     Rco = __ encode_heap_oop(Rtmp, data->as_register());
2907   }
2908 
2909   Label Lretry;
2910   __ bind(Lretry);
2911 
2912   if (data->type() == T_INT) {
2913     const Register Rold = dest->as_register(),
2914                    Rsrc = data->as_register();
2915     assert_different_registers(Rptr, Rtmp, Rold, Rsrc);
2916     __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2917     if (code == lir_xadd) {
2918       __ add(Rtmp, Rsrc, Rold);
2919       __ stwcx_(Rtmp, Rptr);
2920     } else {
2921       __ stwcx_(Rsrc, Rptr);
2922     }
2923   } else if (data->is_oop()) {
2924     assert(code == lir_xchg, "xadd for oops");
2925     const Register Rold = dest->as_register();
2926     if (UseCompressedOops) {
2927       assert_different_registers(Rptr, Rold, Rco);
2928       __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2929       __ stwcx_(Rco, Rptr);
2930     } else {
2931       const Register Robj = data->as_register();
2932       assert_different_registers(Rptr, Rold, Robj);
2933       __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2934       __ stdcx_(Robj, Rptr);
2935     }
2936   } else if (data->type() == T_LONG) {
2937     const Register Rold = dest->as_register_lo(),
2938                    Rsrc = data->as_register_lo();
2939     assert_different_registers(Rptr, Rtmp, Rold, Rsrc);
2940     __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
2941     if (code == lir_xadd) {
2942       __ add(Rtmp, Rsrc, Rold);
2943       __ stdcx_(Rtmp, Rptr);
2944     } else {
2945       __ stdcx_(Rsrc, Rptr);
2946     }
2947   } else {
2948     ShouldNotReachHere();
2949   }
2950 
2951   if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
2952     __ bne_predict_not_taken(CCR0, Lretry);
2953   } else {
2954     __ bne(                  CCR0, Lretry);
2955   }
2956 
2957   if (UseCompressedOops && data->is_oop()) {
2958     __ decode_heap_oop(dest->as_register());
2959   }
2960 }
2961 
2962 
2963 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
2964   Register obj = op->obj()->as_register();
2965   Register tmp = op->tmp()->as_pointer_register();
2966   LIR_Address* mdo_addr = op->mdp()->as_address_ptr();
2967   ciKlass* exact_klass = op->exact_klass();
2968   intptr_t current_klass = op->current_klass();
2969   bool not_null = op->not_null();
2970   bool no_conflict = op->no_conflict();
2971 
2972   Label Lupdate, Ldo_update, Ldone;
2973 
2974   bool do_null = !not_null;
2975   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
2976   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
2977 
2978   assert(do_null || do_update, "why are we here?");
2979   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
2980 
2981   __ verify_oop(obj);
2982 
2983   if (do_null) {
2984     if (!TypeEntries::was_null_seen(current_klass)) {
2985       __ cmpdi(CCR0, obj, 0);
2986       __ bne(CCR0, Lupdate);
2987       __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
2988       __ ori(R0, R0, TypeEntries::null_seen);
2989       if (do_update) {
2990         __ b(Ldo_update);
2991       } else {
2992         __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
2993       }
2994     } else {
2995       if (do_update) {
2996         __ cmpdi(CCR0, obj, 0);
2997         __ beq(CCR0, Ldone);
2998       }
2999     }
3000 #ifdef ASSERT
3001   } else {
3002     __ cmpdi(CCR0, obj, 0);
3003     __ bne(CCR0, Lupdate);
3004     __ stop("unexpect null obj", 0x9652);
3005 #endif
3006   }
3007 
3008   __ bind(Lupdate);
3009   if (do_update) {
3010     Label Lnext;
3011     const Register klass = R29_TOC; // kill and reload
3012     bool klass_reg_used = false;
3013 #ifdef ASSERT
3014     if (exact_klass != NULL) {
3015       Label ok;
3016       klass_reg_used = true;
3017       __ load_klass(klass, obj);
3018       metadata2reg(exact_klass->constant_encoding(), R0);
3019       __ cmpd(CCR0, klass, R0);
3020       __ beq(CCR0, ok);
3021       __ stop("exact klass and actual klass differ", 0x8564);
3022       __ bind(ok);
3023     }
3024 #endif
3025 
3026     if (!no_conflict) {
3027       if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
3028         klass_reg_used = true;
3029         if (exact_klass != NULL) {
3030           __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3031           metadata2reg(exact_klass->constant_encoding(), klass);
3032         } else {
3033           __ load_klass(klass, obj);
3034           __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj
3035         }
3036 
3037         // Like InterpreterMacroAssembler::profile_obj_type
3038         __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
3039         // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
3040         __ cmpd(CCR1, R0, klass);
3041         // Klass seen before, nothing to do (regardless of unknown bit).
3042         //beq(CCR1, do_nothing);
3043 
3044         __ andi_(R0, klass, TypeEntries::type_unknown);
3045         // Already unknown. Nothing to do anymore.
3046         //bne(CCR0, do_nothing);
3047         __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
3048         __ beq(CCR0, Lnext);
3049 
3050         if (TypeEntries::is_type_none(current_klass)) {
3051           __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
3052           __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3053           __ beq(CCR0, Ldo_update); // First time here. Set profile type.
3054         }
3055 
3056       } else {
3057         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3058                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
3059 
3060         __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3061         __ andi_(R0, tmp, TypeEntries::type_unknown);
3062         // Already unknown. Nothing to do anymore.
3063         __ bne(CCR0, Lnext);
3064       }
3065 
3066       // Different than before. Cannot keep accurate profile.
3067       __ ori(R0, tmp, TypeEntries::type_unknown);
3068     } else {
3069       // There's a single possible klass at this profile point
3070       assert(exact_klass != NULL, "should be");
3071       __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3072 
3073       if (TypeEntries::is_type_none(current_klass)) {
3074         klass_reg_used = true;
3075         metadata2reg(exact_klass->constant_encoding(), klass);
3076 
3077         __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
3078         // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
3079         __ cmpd(CCR1, R0, klass);
3080         // Klass seen before, nothing to do (regardless of unknown bit).
3081         __ beq(CCR1, Lnext);
3082 #ifdef ASSERT
3083         {
3084           Label ok;
3085           __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
3086           __ beq(CCR0, ok); // First time here.
3087 
3088           __ stop("unexpected profiling mismatch", 0x7865);
3089           __ bind(ok);
3090         }
3091 #endif
3092         // First time here. Set profile type.
3093         __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
3094       } else {
3095         assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
3096                ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
3097 
3098         // Already unknown. Nothing to do anymore.
3099         __ andi_(R0, tmp, TypeEntries::type_unknown);
3100         __ bne(CCR0, Lnext);
3101 
3102         // Different than before. Cannot keep accurate profile.
3103         __ ori(R0, tmp, TypeEntries::type_unknown);
3104       }
3105     }
3106 
3107     __ bind(Ldo_update);
3108     __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3109 
3110     __ bind(Lnext);
3111     if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit
3112   }
3113   __ bind(Ldone);
3114 }
3115 
3116 
3117 void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) {
3118   assert(op->crc()->is_single_cpu(), "crc must be register");
3119   assert(op->val()->is_single_cpu(), "byte value must be register");
3120   assert(op->result_opr()->is_single_cpu(), "result must be register");
3121   Register crc = op->crc()->as_register();
3122   Register val = op->val()->as_register();
3123   Register res = op->result_opr()->as_register();
3124 
3125   assert_different_registers(val, crc, res);
3126 
3127   __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0);
3128   __ nand(crc, crc, crc); // ~crc
3129   __ update_byte_crc32(crc, val, res);
3130   __ nand(res, crc, crc); // ~crc
3131 }
3132 
3133 #undef __