1 /*
   2  * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_c1_LIRAssembler_sparc.cpp.incl"
  27 
  28 #define __ _masm->
  29 
  30 
  31 //------------------------------------------------------------
  32 
  33 
  34 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
  35   if (opr->is_constant()) {
  36     LIR_Const* constant = opr->as_constant_ptr();
  37     switch (constant->type()) {
  38       case T_INT: {
  39         jint value = constant->as_jint();
  40         return Assembler::is_simm13(value);
  41       }
  42 
  43       default:
  44         return false;
  45     }
  46   }
  47   return false;
  48 }
  49 
  50 
  51 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
  52   switch (op->code()) {
  53     case lir_null_check:
  54     return true;
  55 
  56 
  57     case lir_add:
  58     case lir_ushr:
  59     case lir_shr:
  60     case lir_shl:
  61       // integer shifts and adds are always one instruction
  62       return op->result_opr()->is_single_cpu();
  63 
  64 
  65     case lir_move: {
  66       LIR_Op1* op1 = op->as_Op1();
  67       LIR_Opr src = op1->in_opr();
  68       LIR_Opr dst = op1->result_opr();
  69 
  70       if (src == dst) {
  71         NEEDS_CLEANUP;
  72         // this works around a problem where moves with the same src and dst
  73         // end up in the delay slot and then the assembler swallows the mov
  74         // since it has no effect and then it complains because the delay slot
  75         // is empty.  returning false stops the optimizer from putting this in
  76         // the delay slot
  77         return false;
  78       }
  79 
  80       // don't put moves involving oops into the delay slot since the VerifyOops code
  81       // will make it much larger than a single instruction.
  82       if (VerifyOops) {
  83         return false;
  84       }
  85 
  86       if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
  87           ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
  88         return false;
  89       }
  90 
  91       if (dst->is_register()) {
  92         if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
  93           return !PatchALot;
  94         } else if (src->is_single_stack()) {
  95           return true;
  96         }
  97       }
  98 
  99       if (src->is_register()) {
 100         if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
 101           return !PatchALot;
 102         } else if (dst->is_single_stack()) {
 103           return true;
 104         }
 105       }
 106 
 107       if (dst->is_register() &&
 108           ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
 109            (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
 110         return true;
 111       }
 112 
 113       return false;
 114     }
 115 
 116     default:
 117       return false;
 118   }
 119   ShouldNotReachHere();
 120 }
 121 
 122 
 123 LIR_Opr LIR_Assembler::receiverOpr() {
 124   return FrameMap::O0_oop_opr;
 125 }
 126 
 127 
 128 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
 129   return FrameMap::I0_oop_opr;
 130 }
 131 
 132 
 133 LIR_Opr LIR_Assembler::osrBufferPointer() {
 134   return FrameMap::I0_opr;
 135 }
 136 
 137 
 138 int LIR_Assembler::initial_frame_size_in_bytes() {
 139   return in_bytes(frame_map()->framesize_in_bytes());
 140 }
 141 
 142 
 143 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
 144 // we fetch the class of the receiver (O0) and compare it with the cached class.
 145 // If they do not match we jump to slow case.
 146 int LIR_Assembler::check_icache() {
 147   int offset = __ offset();
 148   __ inline_cache_check(O0, G5_inline_cache_reg);
 149   return offset;
 150 }
 151 
 152 
 153 void LIR_Assembler::osr_entry() {
 154   // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
 155   //
 156   //   1. Create a new compiled activation.
 157   //   2. Initialize local variables in the compiled activation.  The expression stack must be empty
 158   //      at the osr_bci; it is not initialized.
 159   //   3. Jump to the continuation address in compiled code to resume execution.
 160 
 161   // OSR entry point
 162   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 163   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 164   ValueStack* entry_state = osr_entry->end()->state();
 165   int number_of_locks = entry_state->locks_size();
 166 
 167   // Create a frame for the compiled activation.
 168   __ build_frame(initial_frame_size_in_bytes());
 169 
 170   // OSR buffer is
 171   //
 172   // locals[nlocals-1..0]
 173   // monitors[number_of_locks-1..0]
 174   //
 175   // locals is a direct copy of the interpreter frame so in the osr buffer
 176   // so first slot in the local array is the last local from the interpreter
 177   // and last slot is local[0] (receiver) from the interpreter
 178   //
 179   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 180   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 181   // in the interpreter frame (the method lock if a sync method)
 182 
 183   // Initialize monitors in the compiled activation.
 184   //   I0: pointer to osr buffer
 185   //
 186   // All other registers are dead at this point and the locals will be
 187   // copied into place by code emitted in the IR.
 188 
 189   Register OSR_buf = osrBufferPointer()->as_register();
 190   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 191     int monitor_offset = BytesPerWord * method()->max_locals() +
 192       (2 * BytesPerWord) * (number_of_locks - 1);
 193     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 194     // the OSR buffer using 2 word entries: first the lock and then
 195     // the oop.
 196     for (int i = 0; i < number_of_locks; i++) {
 197       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 198 #ifdef ASSERT
 199       // verify the interpreter's monitor has a non-null object
 200       {
 201         Label L;
 202         __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
 203         __ cmp(G0, O7);
 204         __ br(Assembler::notEqual, false, Assembler::pt, L);
 205         __ delayed()->nop();
 206         __ stop("locked object is NULL");
 207         __ bind(L);
 208       }
 209 #endif // ASSERT
 210       // Copy the lock field into the compiled activation.
 211       __ ld_ptr(OSR_buf, slot_offset + 0, O7);
 212       __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
 213       __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
 214       __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
 215     }
 216   }
 217 }
 218 
 219 
 220 // Optimized Library calls
 221 // This is the fast version of java.lang.String.compare; it has not
 222 // OSR-entry and therefore, we generate a slow version for OSR's
 223 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
 224   Register str0 = left->as_register();
 225   Register str1 = right->as_register();
 226 
 227   Label Ldone;
 228 
 229   Register result = dst->as_register();
 230   {
 231     // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
 232     // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
 233     // Also, get string0.count-string1.count in o7 and get the condition code set
 234     // Note: some instructions have been hoisted for better instruction scheduling
 235 
 236     Register tmp0 = L0;
 237     Register tmp1 = L1;
 238     Register tmp2 = L2;
 239 
 240     int  value_offset = java_lang_String:: value_offset_in_bytes(); // char array
 241     int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
 242     int  count_offset = java_lang_String:: count_offset_in_bytes();
 243 
 244     __ ld_ptr(str0, value_offset, tmp0);
 245     __ ld(str0, offset_offset, tmp2);
 246     __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
 247     __ ld(str0, count_offset, str0);
 248     __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
 249 
 250     // str1 may be null
 251     add_debug_info_for_null_check_here(info);
 252 
 253     __ ld_ptr(str1, value_offset, tmp1);
 254     __ add(tmp0, tmp2, tmp0);
 255 
 256     __ ld(str1, offset_offset, tmp2);
 257     __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
 258     __ ld(str1, count_offset, str1);
 259     __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
 260     __ subcc(str0, str1, O7);
 261     __ add(tmp1, tmp2, tmp1);
 262   }
 263 
 264   {
 265     // Compute the minimum of the string lengths, scale it and store it in limit
 266     Register count0 = I0;
 267     Register count1 = I1;
 268     Register limit  = L3;
 269 
 270     Label Lskip;
 271     __ sll(count0, exact_log2(sizeof(jchar)), limit);             // string0 is shorter
 272     __ br(Assembler::greater, true, Assembler::pt, Lskip);
 273     __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit);  // string1 is shorter
 274     __ bind(Lskip);
 275 
 276     // If either string is empty (or both of them) the result is the difference in lengths
 277     __ cmp(limit, 0);
 278     __ br(Assembler::equal, true, Assembler::pn, Ldone);
 279     __ delayed()->mov(O7, result);  // result is difference in lengths
 280   }
 281 
 282   {
 283     // Neither string is empty
 284     Label Lloop;
 285 
 286     Register base0 = L0;
 287     Register base1 = L1;
 288     Register chr0  = I0;
 289     Register chr1  = I1;
 290     Register limit = L3;
 291 
 292     // Shift base0 and base1 to the end of the arrays, negate limit
 293     __ add(base0, limit, base0);
 294     __ add(base1, limit, base1);
 295     __ neg(limit);  // limit = -min{string0.count, strin1.count}
 296 
 297     __ lduh(base0, limit, chr0);
 298     __ bind(Lloop);
 299     __ lduh(base1, limit, chr1);
 300     __ subcc(chr0, chr1, chr0);
 301     __ br(Assembler::notZero, false, Assembler::pn, Ldone);
 302     assert(chr0 == result, "result must be pre-placed");
 303     __ delayed()->inccc(limit, sizeof(jchar));
 304     __ br(Assembler::notZero, true, Assembler::pt, Lloop);
 305     __ delayed()->lduh(base0, limit, chr0);
 306   }
 307 
 308   // If strings are equal up to min length, return the length difference.
 309   __ mov(O7, result);
 310 
 311   // Otherwise, return the difference between the first mismatched chars.
 312   __ bind(Ldone);
 313 }
 314 
 315 
 316 // --------------------------------------------------------------------------------------------
 317 
 318 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
 319   if (!GenerateSynchronizationCode) return;
 320 
 321   Register obj_reg = obj_opr->as_register();
 322   Register lock_reg = lock_opr->as_register();
 323 
 324   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
 325   Register reg = mon_addr.base();
 326   int offset = mon_addr.disp();
 327   // compute pointer to BasicLock
 328   if (mon_addr.is_simm13()) {
 329     __ add(reg, offset, lock_reg);
 330   }
 331   else {
 332     __ set(offset, lock_reg);
 333     __ add(reg, lock_reg, lock_reg);
 334   }
 335   // unlock object
 336   MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
 337   // _slow_case_stubs->append(slow_case);
 338   // temporary fix: must be created after exceptionhandler, therefore as call stub
 339   _slow_case_stubs->append(slow_case);
 340   if (UseFastLocking) {
 341     // try inlined fast unlocking first, revert to slow locking if it fails
 342     // note: lock_reg points to the displaced header since the displaced header offset is 0!
 343     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
 344     __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
 345   } else {
 346     // always do slow unlocking
 347     // note: the slow unlocking code could be inlined here, however if we use
 348     //       slow unlocking, speed doesn't matter anyway and this solution is
 349     //       simpler and requires less duplicated code - additionally, the
 350     //       slow unlocking code is the same in either case which simplifies
 351     //       debugging
 352     __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
 353     __ delayed()->nop();
 354   }
 355   // done
 356   __ bind(*slow_case->continuation());
 357 }
 358 
 359 
 360 int LIR_Assembler::emit_exception_handler() {
 361   // if the last instruction is a call (typically to do a throw which
 362   // is coming at the end after block reordering) the return address
 363   // must still point into the code area in order to avoid assertion
 364   // failures when searching for the corresponding bci => add a nop
 365   // (was bug 5/14/1999 - gri)
 366   __ nop();
 367 
 368   // generate code for exception handler
 369   ciMethod* method = compilation()->method();
 370 
 371   address handler_base = __ start_a_stub(exception_handler_size);
 372 
 373   if (handler_base == NULL) {
 374     // not enough space left for the handler
 375     bailout("exception handler overflow");
 376     return -1;
 377   }
 378 
 379   int offset = code_offset();
 380 
 381   __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
 382   __ delayed()->nop();
 383   debug_only(__ stop("should have gone to the caller");)
 384   assert(code_offset() - offset <= exception_handler_size, "overflow");
 385   __ end_a_stub();
 386 
 387   return offset;
 388 }
 389 
 390 
 391 // Emit the code to remove the frame from the stack in the exception
 392 // unwind path.
 393 int LIR_Assembler::emit_unwind_handler() {
 394 #ifndef PRODUCT
 395   if (CommentedAssembly) {
 396     _masm->block_comment("Unwind handler");
 397   }
 398 #endif
 399 
 400   int offset = code_offset();
 401 
 402   // Fetch the exception from TLS and clear out exception related thread state
 403   __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), O0);
 404   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
 405   __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
 406 
 407   __ bind(_unwind_handler_entry);
 408   __ verify_not_null_oop(O0);
 409   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 410     __ mov(O0, I0);  // Preserve the exception
 411   }
 412 
 413   // Preform needed unlocking
 414   MonitorExitStub* stub = NULL;
 415   if (method()->is_synchronized()) {
 416     monitor_address(0, FrameMap::I1_opr);
 417     stub = new MonitorExitStub(FrameMap::I1_opr, true, 0);
 418     __ unlock_object(I3, I2, I1, *stub->entry());
 419     __ bind(*stub->continuation());
 420   }
 421 
 422   if (compilation()->env()->dtrace_method_probes()) {
 423     jobject2reg(method()->constant_encoding(), O0);
 424     __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), relocInfo::runtime_call_type);
 425     __ delayed()->nop();
 426   }
 427 
 428   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 429     __ mov(I0, O0);  // Restore the exception
 430   }
 431 
 432   // dispatch to the unwind logic
 433   __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
 434   __ delayed()->nop();
 435 
 436   // Emit the slow path assembly
 437   if (stub != NULL) {
 438     stub->emit_code(this);
 439   }
 440 
 441   return offset;
 442 }
 443 
 444 
 445 int LIR_Assembler::emit_deopt_handler() {
 446   // if the last instruction is a call (typically to do a throw which
 447   // is coming at the end after block reordering) the return address
 448   // must still point into the code area in order to avoid assertion
 449   // failures when searching for the corresponding bci => add a nop
 450   // (was bug 5/14/1999 - gri)
 451   __ nop();
 452 
 453   // generate code for deopt handler
 454   ciMethod* method = compilation()->method();
 455   address handler_base = __ start_a_stub(deopt_handler_size);
 456   if (handler_base == NULL) {
 457     // not enough space left for the handler
 458     bailout("deopt handler overflow");
 459     return -1;
 460   }
 461 
 462   int offset = code_offset();
 463   AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
 464   __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
 465   __ delayed()->nop();
 466   assert(code_offset() - offset <= deopt_handler_size, "overflow");
 467   debug_only(__ stop("should have gone to the caller");)
 468   __ end_a_stub();
 469 
 470   return offset;
 471 }
 472 
 473 
 474 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 475   if (o == NULL) {
 476     __ set(NULL_WORD, reg);
 477   } else {
 478     int oop_index = __ oop_recorder()->find_index(o);
 479     RelocationHolder rspec = oop_Relocation::spec(oop_index);
 480     __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
 481   }
 482 }
 483 
 484 
 485 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 486   // Allocate a new index in oop table to hold the oop once it's been patched
 487   int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
 488   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
 489 
 490   AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
 491   assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
 492   // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
 493   // NULL will be dynamically patched later and the patched value may be large.  We must
 494   // therefore generate the sethi/add as a placeholders
 495   __ patchable_set(addrlit, reg);
 496 
 497   patching_epilog(patch, lir_patch_normal, reg, info);
 498 }
 499 
 500 
 501 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 502   Register Rdividend = op->in_opr1()->as_register();
 503   Register Rdivisor  = noreg;
 504   Register Rscratch  = op->in_opr3()->as_register();
 505   Register Rresult   = op->result_opr()->as_register();
 506   int divisor = -1;
 507 
 508   if (op->in_opr2()->is_register()) {
 509     Rdivisor = op->in_opr2()->as_register();
 510   } else {
 511     divisor = op->in_opr2()->as_constant_ptr()->as_jint();
 512     assert(Assembler::is_simm13(divisor), "can only handle simm13");
 513   }
 514 
 515   assert(Rdividend != Rscratch, "");
 516   assert(Rdivisor  != Rscratch, "");
 517   assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
 518 
 519   if (Rdivisor == noreg && is_power_of_2(divisor)) {
 520     // convert division by a power of two into some shifts and logical operations
 521     if (op->code() == lir_idiv) {
 522       if (divisor == 2) {
 523         __ srl(Rdividend, 31, Rscratch);
 524       } else {
 525         __ sra(Rdividend, 31, Rscratch);
 526         __ and3(Rscratch, divisor - 1, Rscratch);
 527       }
 528       __ add(Rdividend, Rscratch, Rscratch);
 529       __ sra(Rscratch, log2_intptr(divisor), Rresult);
 530       return;
 531     } else {
 532       if (divisor == 2) {
 533         __ srl(Rdividend, 31, Rscratch);
 534       } else {
 535         __ sra(Rdividend, 31, Rscratch);
 536         __ and3(Rscratch, divisor - 1,Rscratch);
 537       }
 538       __ add(Rdividend, Rscratch, Rscratch);
 539       __ andn(Rscratch, divisor - 1,Rscratch);
 540       __ sub(Rdividend, Rscratch, Rresult);
 541       return;
 542     }
 543   }
 544 
 545   __ sra(Rdividend, 31, Rscratch);
 546   __ wry(Rscratch);
 547   if (!VM_Version::v9_instructions_work()) {
 548     // v9 doesn't require these nops
 549     __ nop();
 550     __ nop();
 551     __ nop();
 552     __ nop();
 553   }
 554 
 555   add_debug_info_for_div0_here(op->info());
 556 
 557   if (Rdivisor != noreg) {
 558     __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 559   } else {
 560     assert(Assembler::is_simm13(divisor), "can only handle simm13");
 561     __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 562   }
 563 
 564   Label skip;
 565   __ br(Assembler::overflowSet, true, Assembler::pn, skip);
 566   __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
 567   __ bind(skip);
 568 
 569   if (op->code() == lir_irem) {
 570     if (Rdivisor != noreg) {
 571       __ smul(Rscratch, Rdivisor, Rscratch);
 572     } else {
 573       __ smul(Rscratch, divisor, Rscratch);
 574     }
 575     __ sub(Rdividend, Rscratch, Rresult);
 576   }
 577 }
 578 
 579 
 580 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 581 #ifdef ASSERT
 582   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
 583   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
 584   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
 585 #endif
 586   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
 587 
 588   if (op->cond() == lir_cond_always) {
 589     __ br(Assembler::always, false, Assembler::pt, *(op->label()));
 590   } else if (op->code() == lir_cond_float_branch) {
 591     assert(op->ublock() != NULL, "must have unordered successor");
 592     bool is_unordered = (op->ublock() == op->block());
 593     Assembler::Condition acond;
 594     switch (op->cond()) {
 595       case lir_cond_equal:         acond = Assembler::f_equal;    break;
 596       case lir_cond_notEqual:      acond = Assembler::f_notEqual; break;
 597       case lir_cond_less:          acond = (is_unordered ? Assembler::f_unorderedOrLess          : Assembler::f_less);           break;
 598       case lir_cond_greater:       acond = (is_unordered ? Assembler::f_unorderedOrGreater       : Assembler::f_greater);        break;
 599       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
 600       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
 601       default :                         ShouldNotReachHere();
 602     };
 603 
 604     if (!VM_Version::v9_instructions_work()) {
 605       __ nop();
 606     }
 607     __ fb( acond, false, Assembler::pn, *(op->label()));
 608   } else {
 609     assert (op->code() == lir_branch, "just checking");
 610 
 611     Assembler::Condition acond;
 612     switch (op->cond()) {
 613       case lir_cond_equal:        acond = Assembler::equal;                break;
 614       case lir_cond_notEqual:     acond = Assembler::notEqual;             break;
 615       case lir_cond_less:         acond = Assembler::less;                 break;
 616       case lir_cond_lessEqual:    acond = Assembler::lessEqual;            break;
 617       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;         break;
 618       case lir_cond_greater:      acond = Assembler::greater;              break;
 619       case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned; break;
 620       case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;    break;
 621       default:                         ShouldNotReachHere();
 622     };
 623 
 624     // sparc has different condition codes for testing 32-bit
 625     // vs. 64-bit values.  We could always test xcc is we could
 626     // guarantee that 32-bit loads always sign extended but that isn't
 627     // true and since sign extension isn't free, it would impose a
 628     // slight cost.
 629 #ifdef _LP64
 630     if  (op->type() == T_INT) {
 631       __ br(acond, false, Assembler::pn, *(op->label()));
 632     } else
 633 #endif
 634       __ brx(acond, false, Assembler::pn, *(op->label()));
 635   }
 636   // The peephole pass fills the delay slot
 637 }
 638 
 639 
 640 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 641   Bytecodes::Code code = op->bytecode();
 642   LIR_Opr dst = op->result_opr();
 643 
 644   switch(code) {
 645     case Bytecodes::_i2l: {
 646       Register rlo  = dst->as_register_lo();
 647       Register rhi  = dst->as_register_hi();
 648       Register rval = op->in_opr()->as_register();
 649 #ifdef _LP64
 650       __ sra(rval, 0, rlo);
 651 #else
 652       __ mov(rval, rlo);
 653       __ sra(rval, BitsPerInt-1, rhi);
 654 #endif
 655       break;
 656     }
 657     case Bytecodes::_i2d:
 658     case Bytecodes::_i2f: {
 659       bool is_double = (code == Bytecodes::_i2d);
 660       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
 661       FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
 662       FloatRegister rsrc = op->in_opr()->as_float_reg();
 663       if (rsrc != rdst) {
 664         __ fmov(FloatRegisterImpl::S, rsrc, rdst);
 665       }
 666       __ fitof(w, rdst, rdst);
 667       break;
 668     }
 669     case Bytecodes::_f2i:{
 670       FloatRegister rsrc = op->in_opr()->as_float_reg();
 671       Address       addr = frame_map()->address_for_slot(dst->single_stack_ix());
 672       Label L;
 673       // result must be 0 if value is NaN; test by comparing value to itself
 674       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
 675       if (!VM_Version::v9_instructions_work()) {
 676         __ nop();
 677       }
 678       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
 679       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
 680       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
 681       // move integer result from float register to int register
 682       __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
 683       __ bind (L);
 684       break;
 685     }
 686     case Bytecodes::_l2i: {
 687       Register rlo  = op->in_opr()->as_register_lo();
 688       Register rhi  = op->in_opr()->as_register_hi();
 689       Register rdst = dst->as_register();
 690 #ifdef _LP64
 691       __ sra(rlo, 0, rdst);
 692 #else
 693       __ mov(rlo, rdst);
 694 #endif
 695       break;
 696     }
 697     case Bytecodes::_d2f:
 698     case Bytecodes::_f2d: {
 699       bool is_double = (code == Bytecodes::_f2d);
 700       assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
 701       LIR_Opr val = op->in_opr();
 702       FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
 703       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
 704       FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
 705       FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
 706       __ ftof(vw, dw, rval, rdst);
 707       break;
 708     }
 709     case Bytecodes::_i2s:
 710     case Bytecodes::_i2b: {
 711       Register rval = op->in_opr()->as_register();
 712       Register rdst = dst->as_register();
 713       int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
 714       __ sll (rval, shift, rdst);
 715       __ sra (rdst, shift, rdst);
 716       break;
 717     }
 718     case Bytecodes::_i2c: {
 719       Register rval = op->in_opr()->as_register();
 720       Register rdst = dst->as_register();
 721       int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
 722       __ sll (rval, shift, rdst);
 723       __ srl (rdst, shift, rdst);
 724       break;
 725     }
 726 
 727     default: ShouldNotReachHere();
 728   }
 729 }
 730 
 731 
 732 void LIR_Assembler::align_call(LIR_Code) {
 733   // do nothing since all instructions are word aligned on sparc
 734 }
 735 
 736 
 737 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
 738   __ call(op->addr(), rtype);
 739   // The peephole pass fills the delay slot, add_call_info is done in
 740   // LIR_Assembler::emit_delay.
 741 }
 742 
 743 
 744 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
 745   RelocationHolder rspec = virtual_call_Relocation::spec(pc());
 746   __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
 747   __ relocate(rspec);
 748   __ call(op->addr(), relocInfo::none);
 749   // The peephole pass fills the delay slot, add_call_info is done in
 750   // LIR_Assembler::emit_delay.
 751 }
 752 
 753 
 754 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
 755   add_debug_info_for_null_check_here(op->info());
 756   __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
 757   if (__ is_simm13(op->vtable_offset())) {
 758     __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
 759   } else {
 760     // This will generate 2 instructions
 761     __ set(op->vtable_offset(), G5_method);
 762     // ld_ptr, set_hi, set
 763     __ ld_ptr(G3_scratch, G5_method, G5_method);
 764   }
 765   __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
 766   __ callr(G3_scratch, G0);
 767   // the peephole pass fills the delay slot
 768 }
 769 
 770 
 771 // load with 32-bit displacement
 772 int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
 773   int load_offset = code_offset();
 774   if (Assembler::is_simm13(disp)) {
 775     if (info != NULL) add_debug_info_for_null_check_here(info);
 776     switch(ld_type) {
 777       case T_BOOLEAN: // fall through
 778       case T_BYTE  : __ ldsb(s, disp, d); break;
 779       case T_CHAR  : __ lduh(s, disp, d); break;
 780       case T_SHORT : __ ldsh(s, disp, d); break;
 781       case T_INT   : __ ld(s, disp, d); break;
 782       case T_ADDRESS:// fall through
 783       case T_ARRAY : // fall through
 784       case T_OBJECT: __ ld_ptr(s, disp, d); break;
 785       default      : ShouldNotReachHere();
 786     }
 787   } else {
 788     __ set(disp, O7);
 789     if (info != NULL) add_debug_info_for_null_check_here(info);
 790     load_offset = code_offset();
 791     switch(ld_type) {
 792       case T_BOOLEAN: // fall through
 793       case T_BYTE  : __ ldsb(s, O7, d); break;
 794       case T_CHAR  : __ lduh(s, O7, d); break;
 795       case T_SHORT : __ ldsh(s, O7, d); break;
 796       case T_INT   : __ ld(s, O7, d); break;
 797       case T_ADDRESS:// fall through
 798       case T_ARRAY : // fall through
 799       case T_OBJECT: __ ld_ptr(s, O7, d); break;
 800       default      : ShouldNotReachHere();
 801     }
 802   }
 803   if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d);
 804   return load_offset;
 805 }
 806 
 807 
 808 // store with 32-bit displacement
 809 void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
 810   if (Assembler::is_simm13(offset)) {
 811     if (info != NULL)  add_debug_info_for_null_check_here(info);
 812     switch (type) {
 813       case T_BOOLEAN: // fall through
 814       case T_BYTE  : __ stb(value, base, offset); break;
 815       case T_CHAR  : __ sth(value, base, offset); break;
 816       case T_SHORT : __ sth(value, base, offset); break;
 817       case T_INT   : __ stw(value, base, offset); break;
 818       case T_ADDRESS:// fall through
 819       case T_ARRAY : // fall through
 820       case T_OBJECT: __ st_ptr(value, base, offset); break;
 821       default      : ShouldNotReachHere();
 822     }
 823   } else {
 824     __ set(offset, O7);
 825     if (info != NULL) add_debug_info_for_null_check_here(info);
 826     switch (type) {
 827       case T_BOOLEAN: // fall through
 828       case T_BYTE  : __ stb(value, base, O7); break;
 829       case T_CHAR  : __ sth(value, base, O7); break;
 830       case T_SHORT : __ sth(value, base, O7); break;
 831       case T_INT   : __ stw(value, base, O7); break;
 832       case T_ADDRESS:// fall through
 833       case T_ARRAY : //fall through
 834       case T_OBJECT: __ st_ptr(value, base, O7); break;
 835       default      : ShouldNotReachHere();
 836     }
 837   }
 838   // Note: Do the store before verification as the code might be patched!
 839   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value);
 840 }
 841 
 842 
 843 // load float with 32-bit displacement
 844 void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
 845   FloatRegisterImpl::Width w;
 846   switch(ld_type) {
 847     case T_FLOAT : w = FloatRegisterImpl::S; break;
 848     case T_DOUBLE: w = FloatRegisterImpl::D; break;
 849     default      : ShouldNotReachHere();
 850   }
 851 
 852   if (Assembler::is_simm13(disp)) {
 853     if (info != NULL) add_debug_info_for_null_check_here(info);
 854     if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) {
 855       __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor());
 856       __ ldf(FloatRegisterImpl::S, s, disp               , d);
 857     } else {
 858       __ ldf(w, s, disp, d);
 859     }
 860   } else {
 861     __ set(disp, O7);
 862     if (info != NULL) add_debug_info_for_null_check_here(info);
 863     __ ldf(w, s, O7, d);
 864   }
 865 }
 866 
 867 
 868 // store float with 32-bit displacement
 869 void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
 870   FloatRegisterImpl::Width w;
 871   switch(type) {
 872     case T_FLOAT : w = FloatRegisterImpl::S; break;
 873     case T_DOUBLE: w = FloatRegisterImpl::D; break;
 874     default      : ShouldNotReachHere();
 875   }
 876 
 877   if (Assembler::is_simm13(offset)) {
 878     if (info != NULL) add_debug_info_for_null_check_here(info);
 879     if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) {
 880       __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord);
 881       __ stf(FloatRegisterImpl::S, value             , base, offset);
 882     } else {
 883       __ stf(w, value, base, offset);
 884     }
 885   } else {
 886     __ set(offset, O7);
 887     if (info != NULL) add_debug_info_for_null_check_here(info);
 888     __ stf(w, value, O7, base);
 889   }
 890 }
 891 
 892 
 893 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) {
 894   int store_offset;
 895   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
 896     assert(!unaligned, "can't handle this");
 897     // for offsets larger than a simm13 we setup the offset in O7
 898     __ set(offset, O7);
 899     store_offset = store(from_reg, base, O7, type);
 900   } else {
 901     if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
 902     store_offset = code_offset();
 903     switch (type) {
 904       case T_BOOLEAN: // fall through
 905       case T_BYTE  : __ stb(from_reg->as_register(), base, offset); break;
 906       case T_CHAR  : __ sth(from_reg->as_register(), base, offset); break;
 907       case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
 908       case T_INT   : __ stw(from_reg->as_register(), base, offset); break;
 909       case T_LONG  :
 910 #ifdef _LP64
 911         if (unaligned || PatchALot) {
 912           __ srax(from_reg->as_register_lo(), 32, O7);
 913           __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
 914           __ stw(O7,                         base, offset + hi_word_offset_in_bytes);
 915         } else {
 916           __ stx(from_reg->as_register_lo(), base, offset);
 917         }
 918 #else
 919         assert(Assembler::is_simm13(offset + 4), "must be");
 920         __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
 921         __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
 922 #endif
 923         break;
 924       case T_ADDRESS:// fall through
 925       case T_ARRAY : // fall through
 926       case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break;
 927       case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
 928       case T_DOUBLE:
 929         {
 930           FloatRegister reg = from_reg->as_double_reg();
 931           // split unaligned stores
 932           if (unaligned || PatchALot) {
 933             assert(Assembler::is_simm13(offset + 4), "must be");
 934             __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
 935             __ stf(FloatRegisterImpl::S, reg,              base, offset);
 936           } else {
 937             __ stf(FloatRegisterImpl::D, reg, base, offset);
 938           }
 939           break;
 940         }
 941       default      : ShouldNotReachHere();
 942     }
 943   }
 944   return store_offset;
 945 }
 946 
 947 
 948 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) {
 949   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
 950   int store_offset = code_offset();
 951   switch (type) {
 952     case T_BOOLEAN: // fall through
 953     case T_BYTE  : __ stb(from_reg->as_register(), base, disp); break;
 954     case T_CHAR  : __ sth(from_reg->as_register(), base, disp); break;
 955     case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
 956     case T_INT   : __ stw(from_reg->as_register(), base, disp); break;
 957     case T_LONG  :
 958 #ifdef _LP64
 959       __ stx(from_reg->as_register_lo(), base, disp);
 960 #else
 961       assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
 962       __ std(from_reg->as_register_hi(), base, disp);
 963 #endif
 964       break;
 965     case T_ADDRESS:// fall through
 966     case T_ARRAY : // fall through
 967     case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break;
 968     case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
 969     case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
 970     default      : ShouldNotReachHere();
 971   }
 972   return store_offset;
 973 }
 974 
 975 
 976 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) {
 977   int load_offset;
 978   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
 979     assert(base != O7, "destroying register");
 980     assert(!unaligned, "can't handle this");
 981     // for offsets larger than a simm13 we setup the offset in O7
 982     __ set(offset, O7);
 983     load_offset = load(base, O7, to_reg, type);
 984   } else {
 985     load_offset = code_offset();
 986     switch(type) {
 987       case T_BOOLEAN: // fall through
 988       case T_BYTE  : __ ldsb(base, offset, to_reg->as_register()); break;
 989       case T_CHAR  : __ lduh(base, offset, to_reg->as_register()); break;
 990       case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
 991       case T_INT   : __ ld(base, offset, to_reg->as_register()); break;
 992       case T_LONG  :
 993         if (!unaligned) {
 994 #ifdef _LP64
 995           __ ldx(base, offset, to_reg->as_register_lo());
 996 #else
 997           assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
 998                  "must be sequential");
 999           __ ldd(base, offset, to_reg->as_register_hi());
1000 #endif
1001         } else {
1002 #ifdef _LP64
1003           assert(base != to_reg->as_register_lo(), "can't handle this");
1004           assert(O7 != to_reg->as_register_lo(), "can't handle this");
1005           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
1006           __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
1007           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
1008           __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
1009 #else
1010           if (base == to_reg->as_register_lo()) {
1011             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
1012             __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
1013           } else {
1014             __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
1015             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
1016           }
1017 #endif
1018         }
1019         break;
1020       case T_ADDRESS:// fall through
1021       case T_ARRAY : // fall through
1022       case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break;
1023       case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
1024       case T_DOUBLE:
1025         {
1026           FloatRegister reg = to_reg->as_double_reg();
1027           // split unaligned loads
1028           if (unaligned || PatchALot) {
1029             __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
1030             __ ldf(FloatRegisterImpl::S, base, offset,     reg);
1031           } else {
1032             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
1033           }
1034           break;
1035         }
1036       default      : ShouldNotReachHere();
1037     }
1038     if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
1039   }
1040   return load_offset;
1041 }
1042 
1043 
1044 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) {
1045   int load_offset = code_offset();
1046   switch(type) {
1047     case T_BOOLEAN: // fall through
1048     case T_BYTE  : __ ldsb(base, disp, to_reg->as_register()); break;
1049     case T_CHAR  : __ lduh(base, disp, to_reg->as_register()); break;
1050     case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
1051     case T_INT   : __ ld(base, disp, to_reg->as_register()); break;
1052     case T_ADDRESS:// fall through
1053     case T_ARRAY : // fall through
1054     case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break;
1055     case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
1056     case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
1057     case T_LONG  :
1058 #ifdef _LP64
1059       __ ldx(base, disp, to_reg->as_register_lo());
1060 #else
1061       assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
1062              "must be sequential");
1063       __ ldd(base, disp, to_reg->as_register_hi());
1064 #endif
1065       break;
1066     default      : ShouldNotReachHere();
1067   }
1068   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
1069   return load_offset;
1070 }
1071 
1072 
1073 // load/store with an Address
1074 void LIR_Assembler::load(const Address& a, Register d,  BasicType ld_type, CodeEmitInfo *info, int offset) {
1075   load(a.base(), a.disp() + offset, d, ld_type, info);
1076 }
1077 
1078 
1079 void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
1080   store(value, dest.base(), dest.disp() + offset, type, info);
1081 }
1082 
1083 
1084 // loadf/storef with an Address
1085 void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) {
1086   load(a.base(), a.disp() + offset, d, ld_type, info);
1087 }
1088 
1089 
1090 void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
1091   store(value, dest.base(), dest.disp() + offset, type, info);
1092 }
1093 
1094 
1095 // load/store with an Address
1096 void LIR_Assembler::load(LIR_Address* a, Register d,  BasicType ld_type, CodeEmitInfo *info) {
1097   load(as_Address(a), d, ld_type, info);
1098 }
1099 
1100 
1101 void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
1102   store(value, as_Address(dest), type, info);
1103 }
1104 
1105 
1106 // loadf/storef with an Address
1107 void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
1108   load(as_Address(a), d, ld_type, info);
1109 }
1110 
1111 
1112 void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
1113   store(value, as_Address(dest), type, info);
1114 }
1115 
1116 
1117 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
1118   LIR_Const* c = src->as_constant_ptr();
1119   switch (c->type()) {
1120     case T_INT:
1121     case T_FLOAT:
1122     case T_ADDRESS: {
1123       Register src_reg = O7;
1124       int value = c->as_jint_bits();
1125       if (value == 0) {
1126         src_reg = G0;
1127       } else {
1128         __ set(value, O7);
1129       }
1130       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1131       __ stw(src_reg, addr.base(), addr.disp());
1132       break;
1133     }
1134     case T_OBJECT: {
1135       Register src_reg = O7;
1136       jobject2reg(c->as_jobject(), src_reg);
1137       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1138       __ st_ptr(src_reg, addr.base(), addr.disp());
1139       break;
1140     }
1141     case T_LONG:
1142     case T_DOUBLE: {
1143       Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
1144 
1145       Register tmp = O7;
1146       int value_lo = c->as_jint_lo_bits();
1147       if (value_lo == 0) {
1148         tmp = G0;
1149       } else {
1150         __ set(value_lo, O7);
1151       }
1152       __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
1153       int value_hi = c->as_jint_hi_bits();
1154       if (value_hi == 0) {
1155         tmp = G0;
1156       } else {
1157         __ set(value_hi, O7);
1158       }
1159       __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
1160       break;
1161     }
1162     default:
1163       Unimplemented();
1164   }
1165 }
1166 
1167 
1168 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
1169   LIR_Const* c = src->as_constant_ptr();
1170   LIR_Address* addr     = dest->as_address_ptr();
1171   Register base = addr->base()->as_pointer_register();
1172 
1173   if (info != NULL) {
1174     add_debug_info_for_null_check_here(info);
1175   }
1176   switch (c->type()) {
1177     case T_INT:
1178     case T_FLOAT:
1179     case T_ADDRESS: {
1180       LIR_Opr tmp = FrameMap::O7_opr;
1181       int value = c->as_jint_bits();
1182       if (value == 0) {
1183         tmp = FrameMap::G0_opr;
1184       } else if (Assembler::is_simm13(value)) {
1185         __ set(value, O7);
1186       }
1187       if (addr->index()->is_valid()) {
1188         assert(addr->disp() == 0, "must be zero");
1189         store(tmp, base, addr->index()->as_pointer_register(), type);
1190       } else {
1191         assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1192         store(tmp, base, addr->disp(), type);
1193       }
1194       break;
1195     }
1196     case T_LONG:
1197     case T_DOUBLE: {
1198       assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1199       assert(Assembler::is_simm13(addr->disp()) &&
1200              Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1201 
1202       Register tmp = O7;
1203       int value_lo = c->as_jint_lo_bits();
1204       if (value_lo == 0) {
1205         tmp = G0;
1206       } else {
1207         __ set(value_lo, O7);
1208       }
1209       store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT);
1210       int value_hi = c->as_jint_hi_bits();
1211       if (value_hi == 0) {
1212         tmp = G0;
1213       } else {
1214         __ set(value_hi, O7);
1215       }
1216       store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT);
1217       break;
1218     }
1219     case T_OBJECT: {
1220       jobject obj = c->as_jobject();
1221       LIR_Opr tmp;
1222       if (obj == NULL) {
1223         tmp = FrameMap::G0_opr;
1224       } else {
1225         tmp = FrameMap::O7_opr;
1226         jobject2reg(c->as_jobject(), O7);
1227       }
1228       // handle either reg+reg or reg+disp address
1229       if (addr->index()->is_valid()) {
1230         assert(addr->disp() == 0, "must be zero");
1231         store(tmp, base, addr->index()->as_pointer_register(), type);
1232       } else {
1233         assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1234         store(tmp, base, addr->disp(), type);
1235       }
1236 
1237       break;
1238     }
1239     default:
1240       Unimplemented();
1241   }
1242 }
1243 
1244 
1245 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1246   LIR_Const* c = src->as_constant_ptr();
1247   LIR_Opr to_reg = dest;
1248 
1249   switch (c->type()) {
1250     case T_INT:
1251     case T_ADDRESS:
1252       {
1253         jint con = c->as_jint();
1254         if (to_reg->is_single_cpu()) {
1255           assert(patch_code == lir_patch_none, "no patching handled here");
1256           __ set(con, to_reg->as_register());
1257         } else {
1258           ShouldNotReachHere();
1259           assert(to_reg->is_single_fpu(), "wrong register kind");
1260 
1261           __ set(con, O7);
1262           Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1263           __ st(O7, temp_slot);
1264           __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1265         }
1266       }
1267       break;
1268 
1269     case T_LONG:
1270       {
1271         jlong con = c->as_jlong();
1272 
1273         if (to_reg->is_double_cpu()) {
1274 #ifdef _LP64
1275           __ set(con,  to_reg->as_register_lo());
1276 #else
1277           __ set(low(con),  to_reg->as_register_lo());
1278           __ set(high(con), to_reg->as_register_hi());
1279 #endif
1280 #ifdef _LP64
1281         } else if (to_reg->is_single_cpu()) {
1282           __ set(con, to_reg->as_register());
1283 #endif
1284         } else {
1285           ShouldNotReachHere();
1286           assert(to_reg->is_double_fpu(), "wrong register kind");
1287           Address temp_slot_lo(SP, ((frame::register_save_words  ) * wordSize) + STACK_BIAS);
1288           Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1289           __ set(low(con),  O7);
1290           __ st(O7, temp_slot_lo);
1291           __ set(high(con), O7);
1292           __ st(O7, temp_slot_hi);
1293           __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1294         }
1295       }
1296       break;
1297 
1298     case T_OBJECT:
1299       {
1300         if (patch_code == lir_patch_none) {
1301           jobject2reg(c->as_jobject(), to_reg->as_register());
1302         } else {
1303           jobject2reg_with_patching(to_reg->as_register(), info);
1304         }
1305       }
1306       break;
1307 
1308     case T_FLOAT:
1309       {
1310         address const_addr = __ float_constant(c->as_jfloat());
1311         if (const_addr == NULL) {
1312           bailout("const section overflow");
1313           break;
1314         }
1315         RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1316         AddressLiteral const_addrlit(const_addr, rspec);
1317         if (to_reg->is_single_fpu()) {
1318           __ patchable_sethi(const_addrlit, O7);
1319           __ relocate(rspec);
1320           __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1321 
1322         } else {
1323           assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1324 
1325           __ set(const_addrlit, O7);
1326           load(O7, 0, to_reg->as_register(), T_INT);
1327         }
1328       }
1329       break;
1330 
1331     case T_DOUBLE:
1332       {
1333         address const_addr = __ double_constant(c->as_jdouble());
1334         if (const_addr == NULL) {
1335           bailout("const section overflow");
1336           break;
1337         }
1338         RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1339 
1340         if (to_reg->is_double_fpu()) {
1341           AddressLiteral const_addrlit(const_addr, rspec);
1342           __ patchable_sethi(const_addrlit, O7);
1343           __ relocate(rspec);
1344           __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1345         } else {
1346           assert(to_reg->is_double_cpu(), "Must be a long register.");
1347 #ifdef _LP64
1348           __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1349 #else
1350           __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
1351           __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
1352 #endif
1353         }
1354 
1355       }
1356       break;
1357 
1358     default:
1359       ShouldNotReachHere();
1360   }
1361 }
1362 
1363 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1364   Register reg = addr->base()->as_register();
1365   return Address(reg, addr->disp());
1366 }
1367 
1368 
1369 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1370   switch (type) {
1371     case T_INT:
1372     case T_FLOAT: {
1373       Register tmp = O7;
1374       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1375       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1376       __ lduw(from.base(), from.disp(), tmp);
1377       __ stw(tmp, to.base(), to.disp());
1378       break;
1379     }
1380     case T_OBJECT: {
1381       Register tmp = O7;
1382       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1383       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1384       __ ld_ptr(from.base(), from.disp(), tmp);
1385       __ st_ptr(tmp, to.base(), to.disp());
1386       break;
1387     }
1388     case T_LONG:
1389     case T_DOUBLE: {
1390       Register tmp = O7;
1391       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1392       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
1393       __ lduw(from.base(), from.disp(), tmp);
1394       __ stw(tmp, to.base(), to.disp());
1395       __ lduw(from.base(), from.disp() + 4, tmp);
1396       __ stw(tmp, to.base(), to.disp() + 4);
1397       break;
1398     }
1399 
1400     default:
1401       ShouldNotReachHere();
1402   }
1403 }
1404 
1405 
1406 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1407   Address base = as_Address(addr);
1408   return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1409 }
1410 
1411 
1412 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1413   Address base = as_Address(addr);
1414   return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1415 }
1416 
1417 
1418 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1419                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) {
1420 
1421   LIR_Address* addr = src_opr->as_address_ptr();
1422   LIR_Opr to_reg = dest;
1423 
1424   Register src = addr->base()->as_pointer_register();
1425   Register disp_reg = noreg;
1426   int disp_value = addr->disp();
1427   bool needs_patching = (patch_code != lir_patch_none);
1428 
1429   if (addr->base()->type() == T_OBJECT) {
1430     __ verify_oop(src);
1431   }
1432 
1433   PatchingStub* patch = NULL;
1434   if (needs_patching) {
1435     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1436     assert(!to_reg->is_double_cpu() ||
1437            patch_code == lir_patch_none ||
1438            patch_code == lir_patch_normal, "patching doesn't match register");
1439   }
1440 
1441   if (addr->index()->is_illegal()) {
1442     if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1443       if (needs_patching) {
1444         __ patchable_set(0, O7);
1445       } else {
1446         __ set(disp_value, O7);
1447       }
1448       disp_reg = O7;
1449     }
1450   } else if (unaligned || PatchALot) {
1451     __ add(src, addr->index()->as_register(), O7);
1452     src = O7;
1453   } else {
1454     disp_reg = addr->index()->as_pointer_register();
1455     assert(disp_value == 0, "can't handle 3 operand addresses");
1456   }
1457 
1458   // remember the offset of the load.  The patching_epilog must be done
1459   // before the call to add_debug_info, otherwise the PcDescs don't get
1460   // entered in increasing order.
1461   int offset = code_offset();
1462 
1463   assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1464   if (disp_reg == noreg) {
1465     offset = load(src, disp_value, to_reg, type, unaligned);
1466   } else {
1467     assert(!unaligned, "can't handle this");
1468     offset = load(src, disp_reg, to_reg, type);
1469   }
1470 
1471   if (patch != NULL) {
1472     patching_epilog(patch, patch_code, src, info);
1473   }
1474 
1475   if (info != NULL) add_debug_info_for_null_check(offset, info);
1476 }
1477 
1478 
1479 void LIR_Assembler::prefetchr(LIR_Opr src) {
1480   LIR_Address* addr = src->as_address_ptr();
1481   Address from_addr = as_Address(addr);
1482 
1483   if (VM_Version::has_v9()) {
1484     __ prefetch(from_addr, Assembler::severalReads);
1485   }
1486 }
1487 
1488 
1489 void LIR_Assembler::prefetchw(LIR_Opr src) {
1490   LIR_Address* addr = src->as_address_ptr();
1491   Address from_addr = as_Address(addr);
1492 
1493   if (VM_Version::has_v9()) {
1494     __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
1495   }
1496 }
1497 
1498 
1499 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1500   Address addr;
1501   if (src->is_single_word()) {
1502     addr = frame_map()->address_for_slot(src->single_stack_ix());
1503   } else if (src->is_double_word())  {
1504     addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1505   }
1506 
1507   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1508   load(addr.base(), addr.disp(), dest, dest->type(), unaligned);
1509 }
1510 
1511 
1512 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1513   Address addr;
1514   if (dest->is_single_word()) {
1515     addr = frame_map()->address_for_slot(dest->single_stack_ix());
1516   } else if (dest->is_double_word())  {
1517     addr = frame_map()->address_for_slot(dest->double_stack_ix());
1518   }
1519   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1520   store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned);
1521 }
1522 
1523 
1524 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1525   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1526     if (from_reg->is_double_fpu()) {
1527       // double to double moves
1528       assert(to_reg->is_double_fpu(), "should match");
1529       __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1530     } else {
1531       // float to float moves
1532       assert(to_reg->is_single_fpu(), "should match");
1533       __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1534     }
1535   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1536     if (from_reg->is_double_cpu()) {
1537 #ifdef _LP64
1538       __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1539 #else
1540       assert(to_reg->is_double_cpu() &&
1541              from_reg->as_register_hi() != to_reg->as_register_lo() &&
1542              from_reg->as_register_lo() != to_reg->as_register_hi(),
1543              "should both be long and not overlap");
1544       // long to long moves
1545       __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
1546       __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
1547 #endif
1548 #ifdef _LP64
1549     } else if (to_reg->is_double_cpu()) {
1550       // int to int moves
1551       __ mov(from_reg->as_register(), to_reg->as_register_lo());
1552 #endif
1553     } else {
1554       // int to int moves
1555       __ mov(from_reg->as_register(), to_reg->as_register());
1556     }
1557   } else {
1558     ShouldNotReachHere();
1559   }
1560   if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1561     __ verify_oop(to_reg->as_register());
1562   }
1563 }
1564 
1565 
1566 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1567                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1568                             bool unaligned) {
1569   LIR_Address* addr = dest->as_address_ptr();
1570 
1571   Register src = addr->base()->as_pointer_register();
1572   Register disp_reg = noreg;
1573   int disp_value = addr->disp();
1574   bool needs_patching = (patch_code != lir_patch_none);
1575 
1576   if (addr->base()->is_oop_register()) {
1577     __ verify_oop(src);
1578   }
1579 
1580   PatchingStub* patch = NULL;
1581   if (needs_patching) {
1582     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1583     assert(!from_reg->is_double_cpu() ||
1584            patch_code == lir_patch_none ||
1585            patch_code == lir_patch_normal, "patching doesn't match register");
1586   }
1587 
1588   if (addr->index()->is_illegal()) {
1589     if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1590       if (needs_patching) {
1591         __ patchable_set(0, O7);
1592       } else {
1593         __ set(disp_value, O7);
1594       }
1595       disp_reg = O7;
1596     }
1597   } else if (unaligned || PatchALot) {
1598     __ add(src, addr->index()->as_register(), O7);
1599     src = O7;
1600   } else {
1601     disp_reg = addr->index()->as_pointer_register();
1602     assert(disp_value == 0, "can't handle 3 operand addresses");
1603   }
1604 
1605   // remember the offset of the store.  The patching_epilog must be done
1606   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1607   // entered in increasing order.
1608   int offset;
1609 
1610   assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1611   if (disp_reg == noreg) {
1612     offset = store(from_reg, src, disp_value, type, unaligned);
1613   } else {
1614     assert(!unaligned, "can't handle this");
1615     offset = store(from_reg, src, disp_reg, type);
1616   }
1617 
1618   if (patch != NULL) {
1619     patching_epilog(patch, patch_code, src, info);
1620   }
1621 
1622   if (info != NULL) add_debug_info_for_null_check(offset, info);
1623 }
1624 
1625 
1626 void LIR_Assembler::return_op(LIR_Opr result) {
1627   // the poll may need a register so just pick one that isn't the return register
1628 #ifdef TIERED
1629   if (result->type_field() == LIR_OprDesc::long_type) {
1630     // Must move the result to G1
1631     // Must leave proper result in O0,O1 and G1 (TIERED only)
1632     __ sllx(I0, 32, G1);          // Shift bits into high G1
1633     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
1634     __ or3 (I1, G1, G1);          // OR 64 bits into G1
1635   }
1636 #endif // TIERED
1637   __ set((intptr_t)os::get_polling_page(), L0);
1638   __ relocate(relocInfo::poll_return_type);
1639   __ ld_ptr(L0, 0, G0);
1640   __ ret();
1641   __ delayed()->restore();
1642 }
1643 
1644 
1645 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1646   __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1647   if (info != NULL) {
1648     add_debug_info_for_branch(info);
1649   } else {
1650     __ relocate(relocInfo::poll_type);
1651   }
1652 
1653   int offset = __ offset();
1654   __ ld_ptr(tmp->as_register(), 0, G0);
1655 
1656   return offset;
1657 }
1658 
1659 
1660 void LIR_Assembler::emit_static_call_stub() {
1661   address call_pc = __ pc();
1662   address stub = __ start_a_stub(call_stub_size);
1663   if (stub == NULL) {
1664     bailout("static call stub overflow");
1665     return;
1666   }
1667 
1668   int start = __ offset();
1669   __ relocate(static_stub_Relocation::spec(call_pc));
1670 
1671   __ set_oop(NULL, G5);
1672   // must be set to -1 at code generation time
1673   AddressLiteral addrlit(-1);
1674   __ jump_to(addrlit, G3);
1675   __ delayed()->nop();
1676 
1677   assert(__ offset() - start <= call_stub_size, "stub too big");
1678   __ end_a_stub();
1679 }
1680 
1681 
1682 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1683   if (opr1->is_single_fpu()) {
1684     __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1685   } else if (opr1->is_double_fpu()) {
1686     __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1687   } else if (opr1->is_single_cpu()) {
1688     if (opr2->is_constant()) {
1689       switch (opr2->as_constant_ptr()->type()) {
1690         case T_INT:
1691           { jint con = opr2->as_constant_ptr()->as_jint();
1692             if (Assembler::is_simm13(con)) {
1693               __ cmp(opr1->as_register(), con);
1694             } else {
1695               __ set(con, O7);
1696               __ cmp(opr1->as_register(), O7);
1697             }
1698           }
1699           break;
1700 
1701         case T_OBJECT:
1702           // there are only equal/notequal comparisions on objects
1703           { jobject con = opr2->as_constant_ptr()->as_jobject();
1704             if (con == NULL) {
1705               __ cmp(opr1->as_register(), 0);
1706             } else {
1707               jobject2reg(con, O7);
1708               __ cmp(opr1->as_register(), O7);
1709             }
1710           }
1711           break;
1712 
1713         default:
1714           ShouldNotReachHere();
1715           break;
1716       }
1717     } else {
1718       if (opr2->is_address()) {
1719         LIR_Address * addr = opr2->as_address_ptr();
1720         BasicType type = addr->type();
1721         if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1722         else                    __ ld(as_Address(addr), O7);
1723         __ cmp(opr1->as_register(), O7);
1724       } else {
1725         __ cmp(opr1->as_register(), opr2->as_register());
1726       }
1727     }
1728   } else if (opr1->is_double_cpu()) {
1729     Register xlo = opr1->as_register_lo();
1730     Register xhi = opr1->as_register_hi();
1731     if (opr2->is_constant() && opr2->as_jlong() == 0) {
1732       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1733 #ifdef _LP64
1734       __ orcc(xhi, G0, G0);
1735 #else
1736       __ orcc(xhi, xlo, G0);
1737 #endif
1738     } else if (opr2->is_register()) {
1739       Register ylo = opr2->as_register_lo();
1740       Register yhi = opr2->as_register_hi();
1741 #ifdef _LP64
1742       __ cmp(xlo, ylo);
1743 #else
1744       __ subcc(xlo, ylo, xlo);
1745       __ subccc(xhi, yhi, xhi);
1746       if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1747         __ orcc(xhi, xlo, G0);
1748       }
1749 #endif
1750     } else {
1751       ShouldNotReachHere();
1752     }
1753   } else if (opr1->is_address()) {
1754     LIR_Address * addr = opr1->as_address_ptr();
1755     BasicType type = addr->type();
1756     assert (opr2->is_constant(), "Checking");
1757     if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1758     else                    __ ld(as_Address(addr), O7);
1759     __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1760   } else {
1761     ShouldNotReachHere();
1762   }
1763 }
1764 
1765 
1766 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1767   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1768     bool is_unordered_less = (code == lir_ucmp_fd2i);
1769     if (left->is_single_fpu()) {
1770       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1771     } else if (left->is_double_fpu()) {
1772       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1773     } else {
1774       ShouldNotReachHere();
1775     }
1776   } else if (code == lir_cmp_l2i) {
1777 #ifdef _LP64
1778     __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1779 #else
1780     __ lcmp(left->as_register_hi(),  left->as_register_lo(),
1781             right->as_register_hi(), right->as_register_lo(),
1782             dst->as_register());
1783 #endif
1784   } else {
1785     ShouldNotReachHere();
1786   }
1787 }
1788 
1789 
1790 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
1791 
1792   Assembler::Condition acond;
1793   switch (condition) {
1794     case lir_cond_equal:        acond = Assembler::equal;        break;
1795     case lir_cond_notEqual:     acond = Assembler::notEqual;     break;
1796     case lir_cond_less:         acond = Assembler::less;         break;
1797     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    break;
1798     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1799     case lir_cond_greater:      acond = Assembler::greater;      break;
1800     case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned;      break;
1801     case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;      break;
1802     default:                         ShouldNotReachHere();
1803   };
1804 
1805   if (opr1->is_constant() && opr1->type() == T_INT) {
1806     Register dest = result->as_register();
1807     // load up first part of constant before branch
1808     // and do the rest in the delay slot.
1809     if (!Assembler::is_simm13(opr1->as_jint())) {
1810       __ sethi(opr1->as_jint(), dest);
1811     }
1812   } else if (opr1->is_constant()) {
1813     const2reg(opr1, result, lir_patch_none, NULL);
1814   } else if (opr1->is_register()) {
1815     reg2reg(opr1, result);
1816   } else if (opr1->is_stack()) {
1817     stack2reg(opr1, result, result->type());
1818   } else {
1819     ShouldNotReachHere();
1820   }
1821   Label skip;
1822   __ br(acond, false, Assembler::pt, skip);
1823   if (opr1->is_constant() && opr1->type() == T_INT) {
1824     Register dest = result->as_register();
1825     if (Assembler::is_simm13(opr1->as_jint())) {
1826       __ delayed()->or3(G0, opr1->as_jint(), dest);
1827     } else {
1828       // the sethi has been done above, so just put in the low 10 bits
1829       __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1830     }
1831   } else {
1832     // can't do anything useful in the delay slot
1833     __ delayed()->nop();
1834   }
1835   if (opr2->is_constant()) {
1836     const2reg(opr2, result, lir_patch_none, NULL);
1837   } else if (opr2->is_register()) {
1838     reg2reg(opr2, result);
1839   } else if (opr2->is_stack()) {
1840     stack2reg(opr2, result, result->type());
1841   } else {
1842     ShouldNotReachHere();
1843   }
1844   __ bind(skip);
1845 }
1846 
1847 
1848 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1849   assert(info == NULL, "unused on this code path");
1850   assert(left->is_register(), "wrong items state");
1851   assert(dest->is_register(), "wrong items state");
1852 
1853   if (right->is_register()) {
1854     if (dest->is_float_kind()) {
1855 
1856       FloatRegister lreg, rreg, res;
1857       FloatRegisterImpl::Width w;
1858       if (right->is_single_fpu()) {
1859         w = FloatRegisterImpl::S;
1860         lreg = left->as_float_reg();
1861         rreg = right->as_float_reg();
1862         res  = dest->as_float_reg();
1863       } else {
1864         w = FloatRegisterImpl::D;
1865         lreg = left->as_double_reg();
1866         rreg = right->as_double_reg();
1867         res  = dest->as_double_reg();
1868       }
1869 
1870       switch (code) {
1871         case lir_add: __ fadd(w, lreg, rreg, res); break;
1872         case lir_sub: __ fsub(w, lreg, rreg, res); break;
1873         case lir_mul: // fall through
1874         case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1875         case lir_div: // fall through
1876         case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1877         default: ShouldNotReachHere();
1878       }
1879 
1880     } else if (dest->is_double_cpu()) {
1881 #ifdef _LP64
1882       Register dst_lo = dest->as_register_lo();
1883       Register op1_lo = left->as_pointer_register();
1884       Register op2_lo = right->as_pointer_register();
1885 
1886       switch (code) {
1887         case lir_add:
1888           __ add(op1_lo, op2_lo, dst_lo);
1889           break;
1890 
1891         case lir_sub:
1892           __ sub(op1_lo, op2_lo, dst_lo);
1893           break;
1894 
1895         default: ShouldNotReachHere();
1896       }
1897 #else
1898       Register op1_lo = left->as_register_lo();
1899       Register op1_hi = left->as_register_hi();
1900       Register op2_lo = right->as_register_lo();
1901       Register op2_hi = right->as_register_hi();
1902       Register dst_lo = dest->as_register_lo();
1903       Register dst_hi = dest->as_register_hi();
1904 
1905       switch (code) {
1906         case lir_add:
1907           __ addcc(op1_lo, op2_lo, dst_lo);
1908           __ addc (op1_hi, op2_hi, dst_hi);
1909           break;
1910 
1911         case lir_sub:
1912           __ subcc(op1_lo, op2_lo, dst_lo);
1913           __ subc (op1_hi, op2_hi, dst_hi);
1914           break;
1915 
1916         default: ShouldNotReachHere();
1917       }
1918 #endif
1919     } else {
1920       assert (right->is_single_cpu(), "Just Checking");
1921 
1922       Register lreg = left->as_register();
1923       Register res  = dest->as_register();
1924       Register rreg = right->as_register();
1925       switch (code) {
1926         case lir_add:  __ add  (lreg, rreg, res); break;
1927         case lir_sub:  __ sub  (lreg, rreg, res); break;
1928         case lir_mul:  __ mult (lreg, rreg, res); break;
1929         default: ShouldNotReachHere();
1930       }
1931     }
1932   } else {
1933     assert (right->is_constant(), "must be constant");
1934 
1935     if (dest->is_single_cpu()) {
1936       Register lreg = left->as_register();
1937       Register res  = dest->as_register();
1938       int    simm13 = right->as_constant_ptr()->as_jint();
1939 
1940       switch (code) {
1941         case lir_add:  __ add  (lreg, simm13, res); break;
1942         case lir_sub:  __ sub  (lreg, simm13, res); break;
1943         case lir_mul:  __ mult (lreg, simm13, res); break;
1944         default: ShouldNotReachHere();
1945       }
1946     } else {
1947       Register lreg = left->as_pointer_register();
1948       Register res  = dest->as_register_lo();
1949       long con = right->as_constant_ptr()->as_jlong();
1950       assert(Assembler::is_simm13(con), "must be simm13");
1951 
1952       switch (code) {
1953         case lir_add:  __ add  (lreg, (int)con, res); break;
1954         case lir_sub:  __ sub  (lreg, (int)con, res); break;
1955         case lir_mul:  __ mult (lreg, (int)con, res); break;
1956         default: ShouldNotReachHere();
1957       }
1958     }
1959   }
1960 }
1961 
1962 
1963 void LIR_Assembler::fpop() {
1964   // do nothing
1965 }
1966 
1967 
1968 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1969   switch (code) {
1970     case lir_sin:
1971     case lir_tan:
1972     case lir_cos: {
1973       assert(thread->is_valid(), "preserve the thread object for performance reasons");
1974       assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1975       break;
1976     }
1977     case lir_sqrt: {
1978       assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1979       FloatRegister src_reg = value->as_double_reg();
1980       FloatRegister dst_reg = dest->as_double_reg();
1981       __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1982       break;
1983     }
1984     case lir_abs: {
1985       assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1986       FloatRegister src_reg = value->as_double_reg();
1987       FloatRegister dst_reg = dest->as_double_reg();
1988       __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1989       break;
1990     }
1991     default: {
1992       ShouldNotReachHere();
1993       break;
1994     }
1995   }
1996 }
1997 
1998 
1999 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
2000   if (right->is_constant()) {
2001     if (dest->is_single_cpu()) {
2002       int simm13 = right->as_constant_ptr()->as_jint();
2003       switch (code) {
2004         case lir_logic_and:   __ and3 (left->as_register(), simm13, dest->as_register()); break;
2005         case lir_logic_or:    __ or3  (left->as_register(), simm13, dest->as_register()); break;
2006         case lir_logic_xor:   __ xor3 (left->as_register(), simm13, dest->as_register()); break;
2007         default: ShouldNotReachHere();
2008       }
2009     } else {
2010       long c = right->as_constant_ptr()->as_jlong();
2011       assert(c == (int)c && Assembler::is_simm13(c), "out of range");
2012       int simm13 = (int)c;
2013       switch (code) {
2014         case lir_logic_and:
2015 #ifndef _LP64
2016           __ and3 (left->as_register_hi(), 0,      dest->as_register_hi());
2017 #endif
2018           __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
2019           break;
2020 
2021         case lir_logic_or:
2022 #ifndef _LP64
2023           __ or3 (left->as_register_hi(), 0,      dest->as_register_hi());
2024 #endif
2025           __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
2026           break;
2027 
2028         case lir_logic_xor:
2029 #ifndef _LP64
2030           __ xor3 (left->as_register_hi(), 0,      dest->as_register_hi());
2031 #endif
2032           __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
2033           break;
2034 
2035         default: ShouldNotReachHere();
2036       }
2037     }
2038   } else {
2039     assert(right->is_register(), "right should be in register");
2040 
2041     if (dest->is_single_cpu()) {
2042       switch (code) {
2043         case lir_logic_and:   __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
2044         case lir_logic_or:    __ or3  (left->as_register(), right->as_register(), dest->as_register()); break;
2045         case lir_logic_xor:   __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
2046         default: ShouldNotReachHere();
2047       }
2048     } else {
2049 #ifdef _LP64
2050       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
2051                                                                         left->as_register_lo();
2052       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
2053                                                                           right->as_register_lo();
2054 
2055       switch (code) {
2056         case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
2057         case lir_logic_or:  __ or3  (l, r, dest->as_register_lo()); break;
2058         case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
2059         default: ShouldNotReachHere();
2060       }
2061 #else
2062       switch (code) {
2063         case lir_logic_and:
2064           __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2065           __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2066           break;
2067 
2068         case lir_logic_or:
2069           __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2070           __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2071           break;
2072 
2073         case lir_logic_xor:
2074           __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2075           __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2076           break;
2077 
2078         default: ShouldNotReachHere();
2079       }
2080 #endif
2081     }
2082   }
2083 }
2084 
2085 
2086 int LIR_Assembler::shift_amount(BasicType t) {
2087   int elem_size = type2aelembytes(t);
2088   switch (elem_size) {
2089     case 1 : return 0;
2090     case 2 : return 1;
2091     case 4 : return 2;
2092     case 8 : return 3;
2093   }
2094   ShouldNotReachHere();
2095   return -1;
2096 }
2097 
2098 
2099 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
2100   assert(exceptionOop->as_register() == Oexception, "should match");
2101   assert(exceptionPC->as_register() == Oissuing_pc, "should match");
2102 
2103   info->add_register_oop(exceptionOop);
2104 
2105   // reuse the debug info from the safepoint poll for the throw op itself
2106   address pc_for_athrow  = __ pc();
2107   int pc_for_athrow_offset = __ offset();
2108   RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
2109   __ set(pc_for_athrow, Oissuing_pc, rspec);
2110   add_call_info(pc_for_athrow_offset, info); // for exception handler
2111 
2112   __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
2113   __ delayed()->nop();
2114 }
2115 
2116 
2117 void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) {
2118   assert(exceptionOop->as_register() == Oexception, "should match");
2119 
2120   __ br(Assembler::always, false, Assembler::pt, _unwind_handler_entry);
2121   __ delayed()->nop();
2122 }
2123 
2124 
2125 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2126   Register src = op->src()->as_register();
2127   Register dst = op->dst()->as_register();
2128   Register src_pos = op->src_pos()->as_register();
2129   Register dst_pos = op->dst_pos()->as_register();
2130   Register length  = op->length()->as_register();
2131   Register tmp = op->tmp()->as_register();
2132   Register tmp2 = O7;
2133 
2134   int flags = op->flags();
2135   ciArrayKlass* default_type = op->expected_type();
2136   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2137   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2138 
2139   // set up the arraycopy stub information
2140   ArrayCopyStub* stub = op->stub();
2141 
2142   // always do stub if no type information is available.  it's ok if
2143   // the known type isn't loaded since the code sanity checks
2144   // in debug mode and the type isn't required when we know the exact type
2145   // also check that the type is an array type.
2146   // We also, for now, always call the stub if the barrier set requires a
2147   // write_ref_pre barrier (which the stub does, but none of the optimized
2148   // cases currently does).
2149   if (op->expected_type() == NULL ||
2150       Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
2151     __ mov(src,     O0);
2152     __ mov(src_pos, O1);
2153     __ mov(dst,     O2);
2154     __ mov(dst_pos, O3);
2155     __ mov(length,  O4);
2156     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
2157 
2158     __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
2159     __ delayed()->nop();
2160     __ bind(*stub->continuation());
2161     return;
2162   }
2163 
2164   assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
2165 
2166   // make sure src and dst are non-null and load array length
2167   if (flags & LIR_OpArrayCopy::src_null_check) {
2168     __ tst(src);
2169     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2170     __ delayed()->nop();
2171   }
2172 
2173   if (flags & LIR_OpArrayCopy::dst_null_check) {
2174     __ tst(dst);
2175     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2176     __ delayed()->nop();
2177   }
2178 
2179   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2180     // test src_pos register
2181     __ tst(src_pos);
2182     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2183     __ delayed()->nop();
2184   }
2185 
2186   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2187     // test dst_pos register
2188     __ tst(dst_pos);
2189     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2190     __ delayed()->nop();
2191   }
2192 
2193   if (flags & LIR_OpArrayCopy::length_positive_check) {
2194     // make sure length isn't negative
2195     __ tst(length);
2196     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2197     __ delayed()->nop();
2198   }
2199 
2200   if (flags & LIR_OpArrayCopy::src_range_check) {
2201     __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
2202     __ add(length, src_pos, tmp);
2203     __ cmp(tmp2, tmp);
2204     __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2205     __ delayed()->nop();
2206   }
2207 
2208   if (flags & LIR_OpArrayCopy::dst_range_check) {
2209     __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
2210     __ add(length, dst_pos, tmp);
2211     __ cmp(tmp2, tmp);
2212     __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2213     __ delayed()->nop();
2214   }
2215 
2216   if (flags & LIR_OpArrayCopy::type_check) {
2217     __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
2218     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2219     __ cmp(tmp, tmp2);
2220     __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2221     __ delayed()->nop();
2222   }
2223 
2224 #ifdef ASSERT
2225   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2226     // Sanity check the known type with the incoming class.  For the
2227     // primitive case the types must match exactly with src.klass and
2228     // dst.klass each exactly matching the default type.  For the
2229     // object array case, if no type check is needed then either the
2230     // dst type is exactly the expected type and the src type is a
2231     // subtype which we can't check or src is the same array as dst
2232     // but not necessarily exactly of type default_type.
2233     Label known_ok, halt;
2234     jobject2reg(op->expected_type()->constant_encoding(), tmp);
2235     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2236     if (basic_type != T_OBJECT) {
2237       __ cmp(tmp, tmp2);
2238       __ br(Assembler::notEqual, false, Assembler::pn, halt);
2239       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2240       __ cmp(tmp, tmp2);
2241       __ br(Assembler::equal, false, Assembler::pn, known_ok);
2242       __ delayed()->nop();
2243     } else {
2244       __ cmp(tmp, tmp2);
2245       __ br(Assembler::equal, false, Assembler::pn, known_ok);
2246       __ delayed()->cmp(src, dst);
2247       __ br(Assembler::equal, false, Assembler::pn, known_ok);
2248       __ delayed()->nop();
2249     }
2250     __ bind(halt);
2251     __ stop("incorrect type information in arraycopy");
2252     __ bind(known_ok);
2253   }
2254 #endif
2255 
2256   int shift = shift_amount(basic_type);
2257 
2258   Register src_ptr = O0;
2259   Register dst_ptr = O1;
2260   Register len     = O2;
2261 
2262   __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2263   LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
2264   if (shift == 0) {
2265     __ add(src_ptr, src_pos, src_ptr);
2266   } else {
2267     __ sll(src_pos, shift, tmp);
2268     __ add(src_ptr, tmp, src_ptr);
2269   }
2270 
2271   __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2272   LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
2273   if (shift == 0) {
2274     __ add(dst_ptr, dst_pos, dst_ptr);
2275   } else {
2276     __ sll(dst_pos, shift, tmp);
2277     __ add(dst_ptr, tmp, dst_ptr);
2278   }
2279 
2280   if (basic_type != T_OBJECT) {
2281     if (shift == 0) {
2282       __ mov(length, len);
2283     } else {
2284       __ sll(length, shift, len);
2285     }
2286     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy));
2287   } else {
2288     // oop_arraycopy takes a length in number of elements, so don't scale it.
2289     __ mov(length, len);
2290     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy));
2291   }
2292 
2293   __ bind(*stub->continuation());
2294 }
2295 
2296 
2297 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2298   if (dest->is_single_cpu()) {
2299 #ifdef _LP64
2300     if (left->type() == T_OBJECT) {
2301       switch (code) {
2302         case lir_shl:  __ sllx  (left->as_register(), count->as_register(), dest->as_register()); break;
2303         case lir_shr:  __ srax  (left->as_register(), count->as_register(), dest->as_register()); break;
2304         case lir_ushr: __ srl   (left->as_register(), count->as_register(), dest->as_register()); break;
2305         default: ShouldNotReachHere();
2306       }
2307     } else
2308 #endif
2309       switch (code) {
2310         case lir_shl:  __ sll   (left->as_register(), count->as_register(), dest->as_register()); break;
2311         case lir_shr:  __ sra   (left->as_register(), count->as_register(), dest->as_register()); break;
2312         case lir_ushr: __ srl   (left->as_register(), count->as_register(), dest->as_register()); break;
2313         default: ShouldNotReachHere();
2314       }
2315   } else {
2316 #ifdef _LP64
2317     switch (code) {
2318       case lir_shl:  __ sllx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2319       case lir_shr:  __ srax  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2320       case lir_ushr: __ srlx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2321       default: ShouldNotReachHere();
2322     }
2323 #else
2324     switch (code) {
2325       case lir_shl:  __ lshl  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2326       case lir_shr:  __ lshr  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2327       case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2328       default: ShouldNotReachHere();
2329     }
2330 #endif
2331   }
2332 }
2333 
2334 
2335 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2336 #ifdef _LP64
2337   if (left->type() == T_OBJECT) {
2338     count = count & 63;  // shouldn't shift by more than sizeof(intptr_t)
2339     Register l = left->as_register();
2340     Register d = dest->as_register_lo();
2341     switch (code) {
2342       case lir_shl:  __ sllx  (l, count, d); break;
2343       case lir_shr:  __ srax  (l, count, d); break;
2344       case lir_ushr: __ srlx  (l, count, d); break;
2345       default: ShouldNotReachHere();
2346     }
2347     return;
2348   }
2349 #endif
2350 
2351   if (dest->is_single_cpu()) {
2352     count = count & 0x1F; // Java spec
2353     switch (code) {
2354       case lir_shl:  __ sll   (left->as_register(), count, dest->as_register()); break;
2355       case lir_shr:  __ sra   (left->as_register(), count, dest->as_register()); break;
2356       case lir_ushr: __ srl   (left->as_register(), count, dest->as_register()); break;
2357       default: ShouldNotReachHere();
2358     }
2359   } else if (dest->is_double_cpu()) {
2360     count = count & 63; // Java spec
2361     switch (code) {
2362       case lir_shl:  __ sllx  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2363       case lir_shr:  __ srax  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2364       case lir_ushr: __ srlx  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2365       default: ShouldNotReachHere();
2366     }
2367   } else {
2368     ShouldNotReachHere();
2369   }
2370 }
2371 
2372 
2373 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2374   assert(op->tmp1()->as_register()  == G1 &&
2375          op->tmp2()->as_register()  == G3 &&
2376          op->tmp3()->as_register()  == G4 &&
2377          op->obj()->as_register()   == O0 &&
2378          op->klass()->as_register() == G5, "must be");
2379   if (op->init_check()) {
2380     __ ld(op->klass()->as_register(),
2381           instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
2382           op->tmp1()->as_register());
2383     add_debug_info_for_null_check_here(op->stub()->info());
2384     __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
2385     __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2386     __ delayed()->nop();
2387   }
2388   __ allocate_object(op->obj()->as_register(),
2389                      op->tmp1()->as_register(),
2390                      op->tmp2()->as_register(),
2391                      op->tmp3()->as_register(),
2392                      op->header_size(),
2393                      op->object_size(),
2394                      op->klass()->as_register(),
2395                      *op->stub()->entry());
2396   __ bind(*op->stub()->continuation());
2397   __ verify_oop(op->obj()->as_register());
2398 }
2399 
2400 
2401 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2402   assert(op->tmp1()->as_register()  == G1 &&
2403          op->tmp2()->as_register()  == G3 &&
2404          op->tmp3()->as_register()  == G4 &&
2405          op->tmp4()->as_register()  == O1 &&
2406          op->klass()->as_register() == G5, "must be");
2407   if (UseSlowPath ||
2408       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2409       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2410     __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2411     __ delayed()->nop();
2412   } else {
2413     __ allocate_array(op->obj()->as_register(),
2414                       op->len()->as_register(),
2415                       op->tmp1()->as_register(),
2416                       op->tmp2()->as_register(),
2417                       op->tmp3()->as_register(),
2418                       arrayOopDesc::header_size(op->type()),
2419                       type2aelembytes(op->type()),
2420                       op->klass()->as_register(),
2421                       *op->stub()->entry());
2422   }
2423   __ bind(*op->stub()->continuation());
2424 }
2425 
2426 
2427 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2428   LIR_Code code = op->code();
2429   if (code == lir_store_check) {
2430     Register value = op->object()->as_register();
2431     Register array = op->array()->as_register();
2432     Register k_RInfo = op->tmp1()->as_register();
2433     Register klass_RInfo = op->tmp2()->as_register();
2434     Register Rtmp1 = op->tmp3()->as_register();
2435 
2436     __ verify_oop(value);
2437 
2438     CodeStub* stub = op->stub();
2439     Label done;
2440     __ cmp(value, 0);
2441     __ br(Assembler::equal, false, Assembler::pn, done);
2442     __ delayed()->nop();
2443     load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
2444     load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
2445 
2446     // get instance klass
2447     load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
2448     // perform the fast part of the checking logic
2449     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL);
2450 
2451     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2452     assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2453     __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2454     __ delayed()->nop();
2455     __ cmp(G3, 0);
2456     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2457     __ delayed()->nop();
2458     __ bind(done);
2459   } else if (op->code() == lir_checkcast) {
2460     // we always need a stub for the failure case.
2461     CodeStub* stub = op->stub();
2462     Register obj = op->object()->as_register();
2463     Register k_RInfo = op->tmp1()->as_register();
2464     Register klass_RInfo = op->tmp2()->as_register();
2465     Register dst = op->result_opr()->as_register();
2466     Register Rtmp1 = op->tmp3()->as_register();
2467     ciKlass* k = op->klass();
2468 
2469     if (obj == k_RInfo) {
2470       k_RInfo = klass_RInfo;
2471       klass_RInfo = obj;
2472     }
2473     if (op->profiled_method() != NULL) {
2474       ciMethod* method = op->profiled_method();
2475       int bci          = op->profiled_bci();
2476 
2477       // We need two temporaries to perform this operation on SPARC,
2478       // so to keep things simple we perform a redundant test here
2479       Label profile_done;
2480       __ cmp(obj, 0);
2481       __ br(Assembler::notEqual, false, Assembler::pn, profile_done);
2482       __ delayed()->nop();
2483       // Object is null; update methodDataOop
2484       ciMethodData* md = method->method_data();
2485       if (md == NULL) {
2486         bailout("out of memory building methodDataOop");
2487         return;
2488       }
2489       ciProfileData* data = md->bci_to_data(bci);
2490       assert(data != NULL,       "need data for checkcast");
2491       assert(data->is_BitData(), "need BitData for checkcast");
2492       Register mdo      = k_RInfo;
2493       Register data_val = Rtmp1;
2494       jobject2reg(md->constant_encoding(), mdo);
2495 
2496       int mdo_offset_bias = 0;
2497       if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2498         // The offset is large so bias the mdo by the base of the slot so
2499         // that the ld can use simm13s to reference the slots of the data
2500         mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2501         __ set(mdo_offset_bias, data_val);
2502         __ add(mdo, data_val, mdo);
2503       }
2504 
2505 
2506       Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2507       __ ldub(flags_addr, data_val);
2508       __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2509       __ stb(data_val, flags_addr);
2510       __ bind(profile_done);
2511     }
2512 
2513     Label done;
2514     // patching may screw with our temporaries on sparc,
2515     // so let's do it before loading the class
2516     if (k->is_loaded()) {
2517       jobject2reg(k->constant_encoding(), k_RInfo);
2518     } else {
2519       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
2520     }
2521     assert(obj != k_RInfo, "must be different");
2522     __ cmp(obj, 0);
2523     __ br(Assembler::equal, false, Assembler::pn, done);
2524     __ delayed()->nop();
2525 
2526     // get object class
2527     // not a safepoint as obj null check happens earlier
2528     load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
2529     if (op->fast_check()) {
2530       assert_different_registers(klass_RInfo, k_RInfo);
2531       __ cmp(k_RInfo, klass_RInfo);
2532       __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2533       __ delayed()->nop();
2534       __ bind(done);
2535     } else {
2536       bool need_slow_path = true;
2537       if (k->is_loaded()) {
2538         if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
2539           need_slow_path = false;
2540         // perform the fast part of the checking logic
2541         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2542                                          (need_slow_path ? &done : NULL),
2543                                          stub->entry(), NULL,
2544                                          RegisterOrConstant(k->super_check_offset()));
2545       } else {
2546         // perform the fast part of the checking logic
2547         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
2548                                          &done, stub->entry(), NULL);
2549       }
2550       if (need_slow_path) {
2551         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2552         assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2553         __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2554         __ delayed()->nop();
2555         __ cmp(G3, 0);
2556         __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2557         __ delayed()->nop();
2558       }
2559       __ bind(done);
2560     }
2561     __ mov(obj, dst);
2562   } else if (code == lir_instanceof) {
2563     Register obj = op->object()->as_register();
2564     Register k_RInfo = op->tmp1()->as_register();
2565     Register klass_RInfo = op->tmp2()->as_register();
2566     Register dst = op->result_opr()->as_register();
2567     Register Rtmp1 = op->tmp3()->as_register();
2568     ciKlass* k = op->klass();
2569 
2570     Label done;
2571     if (obj == k_RInfo) {
2572       k_RInfo = klass_RInfo;
2573       klass_RInfo = obj;
2574     }
2575     // patching may screw with our temporaries on sparc,
2576     // so let's do it before loading the class
2577     if (k->is_loaded()) {
2578       jobject2reg(k->constant_encoding(), k_RInfo);
2579     } else {
2580       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
2581     }
2582     assert(obj != k_RInfo, "must be different");
2583     __ cmp(obj, 0);
2584     __ br(Assembler::equal, true, Assembler::pn, done);
2585     __ delayed()->set(0, dst);
2586 
2587     // get object class
2588     // not a safepoint as obj null check happens earlier
2589     load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
2590     if (op->fast_check()) {
2591       __ cmp(k_RInfo, klass_RInfo);
2592       __ br(Assembler::equal, true, Assembler::pt, done);
2593       __ delayed()->set(1, dst);
2594       __ set(0, dst);
2595       __ bind(done);
2596     } else {
2597       bool need_slow_path = true;
2598       if (k->is_loaded()) {
2599         if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
2600           need_slow_path = false;
2601         // perform the fast part of the checking logic
2602         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
2603                                          (need_slow_path ? &done : NULL),
2604                                          (need_slow_path ? &done : NULL), NULL,
2605                                          RegisterOrConstant(k->super_check_offset()),
2606                                          dst);
2607       } else {
2608         assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
2609         // perform the fast part of the checking logic
2610         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
2611                                          &done, &done, NULL,
2612                                          RegisterOrConstant(-1),
2613                                          dst);
2614       }
2615       if (need_slow_path) {
2616         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2617         assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2618         __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2619         __ delayed()->nop();
2620         __ mov(G3, dst);
2621       }
2622       __ bind(done);
2623     }
2624   } else {
2625     ShouldNotReachHere();
2626   }
2627 
2628 }
2629 
2630 
2631 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2632   if (op->code() == lir_cas_long) {
2633     assert(VM_Version::supports_cx8(), "wrong machine");
2634     Register addr = op->addr()->as_pointer_register();
2635     Register cmp_value_lo = op->cmp_value()->as_register_lo();
2636     Register cmp_value_hi = op->cmp_value()->as_register_hi();
2637     Register new_value_lo = op->new_value()->as_register_lo();
2638     Register new_value_hi = op->new_value()->as_register_hi();
2639     Register t1 = op->tmp1()->as_register();
2640     Register t2 = op->tmp2()->as_register();
2641 #ifdef _LP64
2642     __ mov(cmp_value_lo, t1);
2643     __ mov(new_value_lo, t2);
2644 #else
2645     // move high and low halves of long values into single registers
2646     __ sllx(cmp_value_hi, 32, t1);         // shift high half into temp reg
2647     __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
2648     __ or3(t1, cmp_value_lo, t1);          // t1 holds 64-bit compare value
2649     __ sllx(new_value_hi, 32, t2);
2650     __ srl(new_value_lo, 0, new_value_lo);
2651     __ or3(t2, new_value_lo, t2);          // t2 holds 64-bit value to swap
2652 #endif
2653     // perform the compare and swap operation
2654     __ casx(addr, t1, t2);
2655     // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2656     // overwritten with the original value in "addr" and will be equal to t1.
2657     __ cmp(t1, t2);
2658 
2659   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2660     Register addr = op->addr()->as_pointer_register();
2661     Register cmp_value = op->cmp_value()->as_register();
2662     Register new_value = op->new_value()->as_register();
2663     Register t1 = op->tmp1()->as_register();
2664     Register t2 = op->tmp2()->as_register();
2665     __ mov(cmp_value, t1);
2666     __ mov(new_value, t2);
2667 #ifdef _LP64
2668     if (op->code() == lir_cas_obj) {
2669       __ casx(addr, t1, t2);
2670     } else
2671 #endif
2672       {
2673         __ cas(addr, t1, t2);
2674       }
2675     __ cmp(t1, t2);
2676   } else {
2677     Unimplemented();
2678   }
2679 }
2680 
2681 void LIR_Assembler::set_24bit_FPU() {
2682   Unimplemented();
2683 }
2684 
2685 
2686 void LIR_Assembler::reset_FPU() {
2687   Unimplemented();
2688 }
2689 
2690 
2691 void LIR_Assembler::breakpoint() {
2692   __ breakpoint_trap();
2693 }
2694 
2695 
2696 void LIR_Assembler::push(LIR_Opr opr) {
2697   Unimplemented();
2698 }
2699 
2700 
2701 void LIR_Assembler::pop(LIR_Opr opr) {
2702   Unimplemented();
2703 }
2704 
2705 
2706 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2707   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2708   Register dst = dst_opr->as_register();
2709   Register reg = mon_addr.base();
2710   int offset = mon_addr.disp();
2711   // compute pointer to BasicLock
2712   if (mon_addr.is_simm13()) {
2713     __ add(reg, offset, dst);
2714   } else {
2715     __ set(offset, dst);
2716     __ add(dst, reg, dst);
2717   }
2718 }
2719 
2720 
2721 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2722   Register obj = op->obj_opr()->as_register();
2723   Register hdr = op->hdr_opr()->as_register();
2724   Register lock = op->lock_opr()->as_register();
2725 
2726   // obj may not be an oop
2727   if (op->code() == lir_lock) {
2728     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2729     if (UseFastLocking) {
2730       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2731       // add debug info for NullPointerException only if one is possible
2732       if (op->info() != NULL) {
2733         add_debug_info_for_null_check_here(op->info());
2734       }
2735       __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2736     } else {
2737       // always do slow locking
2738       // note: the slow locking code could be inlined here, however if we use
2739       //       slow locking, speed doesn't matter anyway and this solution is
2740       //       simpler and requires less duplicated code - additionally, the
2741       //       slow locking code is the same in either case which simplifies
2742       //       debugging
2743       __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2744       __ delayed()->nop();
2745     }
2746   } else {
2747     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2748     if (UseFastLocking) {
2749       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2750       __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2751     } else {
2752       // always do slow unlocking
2753       // note: the slow unlocking code could be inlined here, however if we use
2754       //       slow unlocking, speed doesn't matter anyway and this solution is
2755       //       simpler and requires less duplicated code - additionally, the
2756       //       slow unlocking code is the same in either case which simplifies
2757       //       debugging
2758       __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2759       __ delayed()->nop();
2760     }
2761   }
2762   __ bind(*op->stub()->continuation());
2763 }
2764 
2765 
2766 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2767   ciMethod* method = op->profiled_method();
2768   int bci          = op->profiled_bci();
2769 
2770   // Update counter for all call types
2771   ciMethodData* md = method->method_data();
2772   if (md == NULL) {
2773     bailout("out of memory building methodDataOop");
2774     return;
2775   }
2776   ciProfileData* data = md->bci_to_data(bci);
2777   assert(data->is_CounterData(), "need CounterData for calls");
2778   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2779   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2780   Register mdo  = op->mdo()->as_register();
2781   Register tmp1 = op->tmp1()->as_register();
2782   jobject2reg(md->constant_encoding(), mdo);
2783   int mdo_offset_bias = 0;
2784   if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2785                             data->size_in_bytes())) {
2786     // The offset is large so bias the mdo by the base of the slot so
2787     // that the ld can use simm13s to reference the slots of the data
2788     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2789     __ set(mdo_offset_bias, O7);
2790     __ add(mdo, O7, mdo);
2791   }
2792 
2793   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2794   Bytecodes::Code bc = method->java_code_at_bci(bci);
2795   // Perform additional virtual call profiling for invokevirtual and
2796   // invokeinterface bytecodes
2797   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2798       Tier1ProfileVirtualCalls) {
2799     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2800     Register recv = op->recv()->as_register();
2801     assert_different_registers(mdo, tmp1, recv);
2802     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2803     ciKlass* known_klass = op->known_holder();
2804     if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
2805       // We know the type that will be seen at this call site; we can
2806       // statically update the methodDataOop rather than needing to do
2807       // dynamic tests on the receiver type
2808 
2809       // NOTE: we should probably put a lock around this search to
2810       // avoid collisions by concurrent compilations
2811       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2812       uint i;
2813       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2814         ciKlass* receiver = vc_data->receiver(i);
2815         if (known_klass->equals(receiver)) {
2816           Address data_addr(mdo, md->byte_offset_of_slot(data,
2817                                                          VirtualCallData::receiver_count_offset(i)) -
2818                             mdo_offset_bias);
2819           __ lduw(data_addr, tmp1);
2820           __ add(tmp1, DataLayout::counter_increment, tmp1);
2821           __ stw(tmp1, data_addr);
2822           return;
2823         }
2824       }
2825 
2826       // Receiver type not found in profile data; select an empty slot
2827 
2828       // Note that this is less efficient than it should be because it
2829       // always does a write to the receiver part of the
2830       // VirtualCallData rather than just the first time
2831       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2832         ciKlass* receiver = vc_data->receiver(i);
2833         if (receiver == NULL) {
2834           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2835                             mdo_offset_bias);
2836           jobject2reg(known_klass->constant_encoding(), tmp1);
2837           __ st_ptr(tmp1, recv_addr);
2838           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2839                             mdo_offset_bias);
2840           __ lduw(data_addr, tmp1);
2841           __ add(tmp1, DataLayout::counter_increment, tmp1);
2842           __ stw(tmp1, data_addr);
2843           return;
2844         }
2845       }
2846     } else {
2847       load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
2848       Label update_done;
2849       uint i;
2850       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2851         Label next_test;
2852         // See if the receiver is receiver[n].
2853         Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2854                               mdo_offset_bias);
2855         __ ld_ptr(receiver_addr, tmp1);
2856         __ verify_oop(tmp1);
2857         __ cmp(recv, tmp1);
2858         __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
2859         __ delayed()->nop();
2860         Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2861                           mdo_offset_bias);
2862         __ lduw(data_addr, tmp1);
2863         __ add(tmp1, DataLayout::counter_increment, tmp1);
2864         __ stw(tmp1, data_addr);
2865         __ br(Assembler::always, false, Assembler::pt, update_done);
2866         __ delayed()->nop();
2867         __ bind(next_test);
2868       }
2869 
2870       // Didn't find receiver; find next empty slot and fill it in
2871       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2872         Label next_test;
2873         Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2874                           mdo_offset_bias);
2875         load(recv_addr, tmp1, T_OBJECT);
2876         __ tst(tmp1);
2877         __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
2878         __ delayed()->nop();
2879         __ st_ptr(recv, recv_addr);
2880         __ set(DataLayout::counter_increment, tmp1);
2881         __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2882                   mdo_offset_bias);
2883         __ br(Assembler::always, false, Assembler::pt, update_done);
2884         __ delayed()->nop();
2885         __ bind(next_test);
2886       }
2887       // Receiver did not match any saved receiver and there is no empty row for it.
2888       // Increment total counter to indicate polymorphic case.
2889       __ lduw(counter_addr, tmp1);
2890       __ add(tmp1, DataLayout::counter_increment, tmp1);
2891       __ stw(tmp1, counter_addr);
2892 
2893       __ bind(update_done);
2894     }
2895   } else {
2896     // Static call
2897     __ lduw(counter_addr, tmp1);
2898     __ add(tmp1, DataLayout::counter_increment, tmp1);
2899     __ stw(tmp1, counter_addr);
2900   }
2901 }
2902 
2903 
2904 void LIR_Assembler::align_backward_branch_target() {
2905   __ align(OptoLoopAlignment);
2906 }
2907 
2908 
2909 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
2910   // make sure we are expecting a delay
2911   // this has the side effect of clearing the delay state
2912   // so we can use _masm instead of _masm->delayed() to do the
2913   // code generation.
2914   __ delayed();
2915 
2916   // make sure we only emit one instruction
2917   int offset = code_offset();
2918   op->delay_op()->emit_code(this);
2919 #ifdef ASSERT
2920   if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
2921     op->delay_op()->print();
2922   }
2923   assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
2924          "only one instruction can go in a delay slot");
2925 #endif
2926 
2927   // we may also be emitting the call info for the instruction
2928   // which we are the delay slot of.
2929   CodeEmitInfo* call_info = op->call_info();
2930   if (call_info) {
2931     add_call_info(code_offset(), call_info);
2932   }
2933 
2934   if (VerifyStackAtCalls) {
2935     _masm->sub(FP, SP, O7);
2936     _masm->cmp(O7, initial_frame_size_in_bytes());
2937     _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
2938   }
2939 }
2940 
2941 
2942 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2943   assert(left->is_register(), "can only handle registers");
2944 
2945   if (left->is_single_cpu()) {
2946     __ neg(left->as_register(), dest->as_register());
2947   } else if (left->is_single_fpu()) {
2948     __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
2949   } else if (left->is_double_fpu()) {
2950     __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
2951   } else {
2952     assert (left->is_double_cpu(), "Must be a long");
2953     Register Rlow = left->as_register_lo();
2954     Register Rhi = left->as_register_hi();
2955 #ifdef _LP64
2956     __ sub(G0, Rlow, dest->as_register_lo());
2957 #else
2958     __ subcc(G0, Rlow, dest->as_register_lo());
2959     __ subc (G0, Rhi,  dest->as_register_hi());
2960 #endif
2961   }
2962 }
2963 
2964 
2965 void LIR_Assembler::fxch(int i) {
2966   Unimplemented();
2967 }
2968 
2969 void LIR_Assembler::fld(int i) {
2970   Unimplemented();
2971 }
2972 
2973 void LIR_Assembler::ffree(int i) {
2974   Unimplemented();
2975 }
2976 
2977 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2978                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2979 
2980   // if tmp is invalid, then the function being called doesn't destroy the thread
2981   if (tmp->is_valid()) {
2982     __ save_thread(tmp->as_register());
2983   }
2984   __ call(dest, relocInfo::runtime_call_type);
2985   __ delayed()->nop();
2986   if (info != NULL) {
2987     add_call_info_here(info);
2988   }
2989   if (tmp->is_valid()) {
2990     __ restore_thread(tmp->as_register());
2991   }
2992 
2993 #ifdef ASSERT
2994   __ verify_thread();
2995 #endif // ASSERT
2996 }
2997 
2998 
2999 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3000 #ifdef _LP64
3001   ShouldNotReachHere();
3002 #endif
3003 
3004   NEEDS_CLEANUP;
3005   if (type == T_LONG) {
3006     LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
3007 
3008     // (extended to allow indexed as well as constant displaced for JSR-166)
3009     Register idx = noreg; // contains either constant offset or index
3010 
3011     int disp = mem_addr->disp();
3012     if (mem_addr->index() == LIR_OprFact::illegalOpr) {
3013       if (!Assembler::is_simm13(disp)) {
3014         idx = O7;
3015         __ set(disp, idx);
3016       }
3017     } else {
3018       assert(disp == 0, "not both indexed and disp");
3019       idx = mem_addr->index()->as_register();
3020     }
3021 
3022     int null_check_offset = -1;
3023 
3024     Register base = mem_addr->base()->as_register();
3025     if (src->is_register() && dest->is_address()) {
3026       // G4 is high half, G5 is low half
3027       if (VM_Version::v9_instructions_work()) {
3028         // clear the top bits of G5, and scale up G4
3029         __ srl (src->as_register_lo(),  0, G5);
3030         __ sllx(src->as_register_hi(), 32, G4);
3031         // combine the two halves into the 64 bits of G4
3032         __ or3(G4, G5, G4);
3033         null_check_offset = __ offset();
3034         if (idx == noreg) {
3035           __ stx(G4, base, disp);
3036         } else {
3037           __ stx(G4, base, idx);
3038         }
3039       } else {
3040         __ mov (src->as_register_hi(), G4);
3041         __ mov (src->as_register_lo(), G5);
3042         null_check_offset = __ offset();
3043         if (idx == noreg) {
3044           __ std(G4, base, disp);
3045         } else {
3046           __ std(G4, base, idx);
3047         }
3048       }
3049     } else if (src->is_address() && dest->is_register()) {
3050       null_check_offset = __ offset();
3051       if (VM_Version::v9_instructions_work()) {
3052         if (idx == noreg) {
3053           __ ldx(base, disp, G5);
3054         } else {
3055           __ ldx(base, idx, G5);
3056         }
3057         __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3058         __ mov (G5, dest->as_register_lo());     // copy low half into lo
3059       } else {
3060         if (idx == noreg) {
3061           __ ldd(base, disp, G4);
3062         } else {
3063           __ ldd(base, idx, G4);
3064         }
3065         // G4 is high half, G5 is low half
3066         __ mov (G4, dest->as_register_hi());
3067         __ mov (G5, dest->as_register_lo());
3068       }
3069     } else {
3070       Unimplemented();
3071     }
3072     if (info != NULL) {
3073       add_debug_info_for_null_check(null_check_offset, info);
3074     }
3075 
3076   } else {
3077     // use normal move for all other volatiles since they don't need
3078     // special handling to remain atomic.
3079     move_op(src, dest, type, lir_patch_none, info, false, false);
3080   }
3081 }
3082 
3083 void LIR_Assembler::membar() {
3084   // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3085   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3086 }
3087 
3088 void LIR_Assembler::membar_acquire() {
3089   // no-op on TSO
3090 }
3091 
3092 void LIR_Assembler::membar_release() {
3093   // no-op on TSO
3094 }
3095 
3096 // Macro to Pack two sequential registers containing 32 bit values
3097 // into a single 64 bit register.
3098 // rs and rs->successor() are packed into rd
3099 // rd and rs may be the same register.
3100 // Note: rs and rs->successor() are destroyed.
3101 void LIR_Assembler::pack64( Register rs, Register rd ) {
3102   __ sllx(rs, 32, rs);
3103   __ srl(rs->successor(), 0, rs->successor());
3104   __ or3(rs, rs->successor(), rd);
3105 }
3106 
3107 // Macro to unpack a 64 bit value in a register into
3108 // two sequential registers.
3109 // rd is unpacked into rd and rd->successor()
3110 void LIR_Assembler::unpack64( Register rd ) {
3111   __ mov(rd, rd->successor());
3112   __ srax(rd, 32, rd);
3113   __ sra(rd->successor(), 0, rd->successor());
3114 }
3115 
3116 
3117 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
3118   LIR_Address* addr = addr_opr->as_address_ptr();
3119   assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
3120   __ add(addr->base()->as_register(), addr->disp(), dest->as_register());
3121 }
3122 
3123 
3124 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3125   assert(result_reg->is_register(), "check");
3126   __ mov(G2_thread, result_reg->as_register());
3127 }
3128 
3129 
3130 void LIR_Assembler::peephole(LIR_List* lir) {
3131   LIR_OpList* inst = lir->instructions_list();
3132   for (int i = 0; i < inst->length(); i++) {
3133     LIR_Op* op = inst->at(i);
3134     switch (op->code()) {
3135       case lir_cond_float_branch:
3136       case lir_branch: {
3137         LIR_OpBranch* branch = op->as_OpBranch();
3138         assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3139         LIR_Op* delay_op = NULL;
3140         // we'd like to be able to pull following instructions into
3141         // this slot but we don't know enough to do it safely yet so
3142         // only optimize block to block control flow.
3143         if (LIRFillDelaySlots && branch->block()) {
3144           LIR_Op* prev = inst->at(i - 1);
3145           if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3146             // swap previous instruction into delay slot
3147             inst->at_put(i - 1, op);
3148             inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3149 #ifndef PRODUCT
3150             if (LIRTracePeephole) {
3151               tty->print_cr("delayed");
3152               inst->at(i - 1)->print();
3153               inst->at(i)->print();
3154               tty->cr();
3155             }
3156 #endif
3157             continue;
3158           }
3159         }
3160 
3161         if (!delay_op) {
3162           delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3163         }
3164         inst->insert_before(i + 1, delay_op);
3165         break;
3166       }
3167       case lir_static_call:
3168       case lir_virtual_call:
3169       case lir_icvirtual_call:
3170       case lir_optvirtual_call:
3171       case lir_dynamic_call: {
3172         LIR_Op* prev = inst->at(i - 1);
3173         if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3174             (op->code() != lir_virtual_call ||
3175              !prev->result_opr()->is_single_cpu() ||
3176              prev->result_opr()->as_register() != O0) &&
3177             LIR_Assembler::is_single_instruction(prev)) {
3178           // Only moves without info can be put into the delay slot.
3179           // Also don't allow the setup of the receiver in the delay
3180           // slot for vtable calls.
3181           inst->at_put(i - 1, op);
3182           inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3183 #ifndef PRODUCT
3184           if (LIRTracePeephole) {
3185             tty->print_cr("delayed");
3186             inst->at(i - 1)->print();
3187             inst->at(i)->print();
3188             tty->cr();
3189           }
3190 #endif
3191           continue;
3192         }
3193 
3194         LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3195         inst->insert_before(i + 1, delay_op);
3196         break;
3197       }
3198     }
3199   }
3200 }
3201 
3202 
3203 
3204 
3205 #undef __