1 /*
   2  * Copyright 2000-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_c1_LIRAssembler_sparc.cpp.incl"
  27 
  28 #define __ _masm->
  29 
  30 
  31 //------------------------------------------------------------
  32 
  33 
  34 bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
  35   if (opr->is_constant()) {
  36     LIR_Const* constant = opr->as_constant_ptr();
  37     switch (constant->type()) {
  38       case T_INT: {
  39         jint value = constant->as_jint();
  40         return Assembler::is_simm13(value);
  41       }
  42 
  43       default:
  44         return false;
  45     }
  46   }
  47   return false;
  48 }
  49 
  50 
  51 bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
  52   switch (op->code()) {
  53     case lir_null_check:
  54     return true;
  55 
  56 
  57     case lir_add:
  58     case lir_ushr:
  59     case lir_shr:
  60     case lir_shl:
  61       // integer shifts and adds are always one instruction
  62       return op->result_opr()->is_single_cpu();
  63 
  64 
  65     case lir_move: {
  66       LIR_Op1* op1 = op->as_Op1();
  67       LIR_Opr src = op1->in_opr();
  68       LIR_Opr dst = op1->result_opr();
  69 
  70       if (src == dst) {
  71         NEEDS_CLEANUP;
  72         // this works around a problem where moves with the same src and dst
  73         // end up in the delay slot and then the assembler swallows the mov
  74         // since it has no effect and then it complains because the delay slot
  75         // is empty.  returning false stops the optimizer from putting this in
  76         // the delay slot
  77         return false;
  78       }
  79 
  80       // don't put moves involving oops into the delay slot since the VerifyOops code
  81       // will make it much larger than a single instruction.
  82       if (VerifyOops) {
  83         return false;
  84       }
  85 
  86       if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
  87           ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
  88         return false;
  89       }
  90 
  91       if (dst->is_register()) {
  92         if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
  93           return !PatchALot;
  94         } else if (src->is_single_stack()) {
  95           return true;
  96         }
  97       }
  98 
  99       if (src->is_register()) {
 100         if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
 101           return !PatchALot;
 102         } else if (dst->is_single_stack()) {
 103           return true;
 104         }
 105       }
 106 
 107       if (dst->is_register() &&
 108           ((src->is_register() && src->is_single_word() && src->is_same_type(dst)) ||
 109            (src->is_constant() && LIR_Assembler::is_small_constant(op->as_Op1()->in_opr())))) {
 110         return true;
 111       }
 112 
 113       return false;
 114     }
 115 
 116     default:
 117       return false;
 118   }
 119   ShouldNotReachHere();
 120 }
 121 
 122 
 123 LIR_Opr LIR_Assembler::receiverOpr() {
 124   return FrameMap::O0_oop_opr;
 125 }
 126 
 127 
 128 LIR_Opr LIR_Assembler::incomingReceiverOpr() {
 129   return FrameMap::I0_oop_opr;
 130 }
 131 
 132 
 133 LIR_Opr LIR_Assembler::osrBufferPointer() {
 134   return FrameMap::I0_opr;
 135 }
 136 
 137 
 138 int LIR_Assembler::initial_frame_size_in_bytes() {
 139   return in_bytes(frame_map()->framesize_in_bytes());
 140 }
 141 
 142 
 143 // inline cache check: the inline cached class is in G5_inline_cache_reg(G5);
 144 // we fetch the class of the receiver (O0) and compare it with the cached class.
 145 // If they do not match we jump to slow case.
 146 int LIR_Assembler::check_icache() {
 147   int offset = __ offset();
 148   __ inline_cache_check(O0, G5_inline_cache_reg);
 149   return offset;
 150 }
 151 
 152 
 153 void LIR_Assembler::osr_entry() {
 154   // On-stack-replacement entry sequence (interpreter frame layout described in interpreter_sparc.cpp):
 155   //
 156   //   1. Create a new compiled activation.
 157   //   2. Initialize local variables in the compiled activation.  The expression stack must be empty
 158   //      at the osr_bci; it is not initialized.
 159   //   3. Jump to the continuation address in compiled code to resume execution.
 160 
 161   // OSR entry point
 162   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 163   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 164   ValueStack* entry_state = osr_entry->end()->state();
 165   int number_of_locks = entry_state->locks_size();
 166 
 167   // Create a frame for the compiled activation.
 168   __ build_frame(initial_frame_size_in_bytes());
 169 
 170   // OSR buffer is
 171   //
 172   // locals[nlocals-1..0]
 173   // monitors[number_of_locks-1..0]
 174   //
 175   // locals is a direct copy of the interpreter frame so in the osr buffer
 176   // so first slot in the local array is the last local from the interpreter
 177   // and last slot is local[0] (receiver) from the interpreter
 178   //
 179   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 180   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 181   // in the interpreter frame (the method lock if a sync method)
 182 
 183   // Initialize monitors in the compiled activation.
 184   //   I0: pointer to osr buffer
 185   //
 186   // All other registers are dead at this point and the locals will be
 187   // copied into place by code emitted in the IR.
 188 
 189   Register OSR_buf = osrBufferPointer()->as_register();
 190   { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
 191     int monitor_offset = BytesPerWord * method()->max_locals() +
 192       (2 * BytesPerWord) * (number_of_locks - 1);
 193     // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
 194     // the OSR buffer using 2 word entries: first the lock and then
 195     // the oop.
 196     for (int i = 0; i < number_of_locks; i++) {
 197       int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
 198 #ifdef ASSERT
 199       // verify the interpreter's monitor has a non-null object
 200       {
 201         Label L;
 202         __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
 203         __ cmp(G0, O7);
 204         __ br(Assembler::notEqual, false, Assembler::pt, L);
 205         __ delayed()->nop();
 206         __ stop("locked object is NULL");
 207         __ bind(L);
 208       }
 209 #endif // ASSERT
 210       // Copy the lock field into the compiled activation.
 211       __ ld_ptr(OSR_buf, slot_offset + 0, O7);
 212       __ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
 213       __ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
 214       __ st_ptr(O7, frame_map()->address_for_monitor_object(i));
 215     }
 216   }
 217 }
 218 
 219 
 220 // Optimized Library calls
 221 // This is the fast version of java.lang.String.compare; it has not
 222 // OSR-entry and therefore, we generate a slow version for OSR's
 223 void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst, CodeEmitInfo* info) {
 224   Register str0 = left->as_register();
 225   Register str1 = right->as_register();
 226 
 227   Label Ldone;
 228 
 229   Register result = dst->as_register();
 230   {
 231     // Get a pointer to the first character of string0 in tmp0 and get string0.count in str0
 232     // Get a pointer to the first character of string1 in tmp1 and get string1.count in str1
 233     // Also, get string0.count-string1.count in o7 and get the condition code set
 234     // Note: some instructions have been hoisted for better instruction scheduling
 235 
 236     Register tmp0 = L0;
 237     Register tmp1 = L1;
 238     Register tmp2 = L2;
 239 
 240     int  value_offset = java_lang_String:: value_offset_in_bytes(); // char array
 241     int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
 242     int  count_offset = java_lang_String:: count_offset_in_bytes();
 243 
 244     __ ld_ptr(str0, value_offset, tmp0);
 245     __ ld(str0, offset_offset, tmp2);
 246     __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
 247     __ ld(str0, count_offset, str0);
 248     __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
 249 
 250     // str1 may be null
 251     add_debug_info_for_null_check_here(info);
 252 
 253     __ ld_ptr(str1, value_offset, tmp1);
 254     __ add(tmp0, tmp2, tmp0);
 255 
 256     __ ld(str1, offset_offset, tmp2);
 257     __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1);
 258     __ ld(str1, count_offset, str1);
 259     __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2);
 260     __ subcc(str0, str1, O7);
 261     __ add(tmp1, tmp2, tmp1);
 262   }
 263 
 264   {
 265     // Compute the minimum of the string lengths, scale it and store it in limit
 266     Register count0 = I0;
 267     Register count1 = I1;
 268     Register limit  = L3;
 269 
 270     Label Lskip;
 271     __ sll(count0, exact_log2(sizeof(jchar)), limit);             // string0 is shorter
 272     __ br(Assembler::greater, true, Assembler::pt, Lskip);
 273     __ delayed()->sll(count1, exact_log2(sizeof(jchar)), limit);  // string1 is shorter
 274     __ bind(Lskip);
 275 
 276     // If either string is empty (or both of them) the result is the difference in lengths
 277     __ cmp(limit, 0);
 278     __ br(Assembler::equal, true, Assembler::pn, Ldone);
 279     __ delayed()->mov(O7, result);  // result is difference in lengths
 280   }
 281 
 282   {
 283     // Neither string is empty
 284     Label Lloop;
 285 
 286     Register base0 = L0;
 287     Register base1 = L1;
 288     Register chr0  = I0;
 289     Register chr1  = I1;
 290     Register limit = L3;
 291 
 292     // Shift base0 and base1 to the end of the arrays, negate limit
 293     __ add(base0, limit, base0);
 294     __ add(base1, limit, base1);
 295     __ neg(limit);  // limit = -min{string0.count, strin1.count}
 296 
 297     __ lduh(base0, limit, chr0);
 298     __ bind(Lloop);
 299     __ lduh(base1, limit, chr1);
 300     __ subcc(chr0, chr1, chr0);
 301     __ br(Assembler::notZero, false, Assembler::pn, Ldone);
 302     assert(chr0 == result, "result must be pre-placed");
 303     __ delayed()->inccc(limit, sizeof(jchar));
 304     __ br(Assembler::notZero, true, Assembler::pt, Lloop);
 305     __ delayed()->lduh(base0, limit, chr0);
 306   }
 307 
 308   // If strings are equal up to min length, return the length difference.
 309   __ mov(O7, result);
 310 
 311   // Otherwise, return the difference between the first mismatched chars.
 312   __ bind(Ldone);
 313 }
 314 
 315 
 316 // --------------------------------------------------------------------------------------------
 317 
 318 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no) {
 319   if (!GenerateSynchronizationCode) return;
 320 
 321   Register obj_reg = obj_opr->as_register();
 322   Register lock_reg = lock_opr->as_register();
 323 
 324   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
 325   Register reg = mon_addr.base();
 326   int offset = mon_addr.disp();
 327   // compute pointer to BasicLock
 328   if (mon_addr.is_simm13()) {
 329     __ add(reg, offset, lock_reg);
 330   }
 331   else {
 332     __ set(offset, lock_reg);
 333     __ add(reg, lock_reg, lock_reg);
 334   }
 335   // unlock object
 336   MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, UseFastLocking, monitor_no);
 337   // _slow_case_stubs->append(slow_case);
 338   // temporary fix: must be created after exceptionhandler, therefore as call stub
 339   _slow_case_stubs->append(slow_case);
 340   if (UseFastLocking) {
 341     // try inlined fast unlocking first, revert to slow locking if it fails
 342     // note: lock_reg points to the displaced header since the displaced header offset is 0!
 343     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
 344     __ unlock_object(hdr, obj_reg, lock_reg, *slow_case->entry());
 345   } else {
 346     // always do slow unlocking
 347     // note: the slow unlocking code could be inlined here, however if we use
 348     //       slow unlocking, speed doesn't matter anyway and this solution is
 349     //       simpler and requires less duplicated code - additionally, the
 350     //       slow unlocking code is the same in either case which simplifies
 351     //       debugging
 352     __ br(Assembler::always, false, Assembler::pt, *slow_case->entry());
 353     __ delayed()->nop();
 354   }
 355   // done
 356   __ bind(*slow_case->continuation());
 357 }
 358 
 359 
 360 int LIR_Assembler::emit_exception_handler() {
 361   // if the last instruction is a call (typically to do a throw which
 362   // is coming at the end after block reordering) the return address
 363   // must still point into the code area in order to avoid assertion
 364   // failures when searching for the corresponding bci => add a nop
 365   // (was bug 5/14/1999 - gri)
 366   __ nop();
 367 
 368   // generate code for exception handler
 369   ciMethod* method = compilation()->method();
 370 
 371   address handler_base = __ start_a_stub(exception_handler_size);
 372 
 373   if (handler_base == NULL) {
 374     // not enough space left for the handler
 375     bailout("exception handler overflow");
 376     return -1;
 377   }
 378 
 379   int offset = code_offset();
 380 
 381   __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
 382   __ delayed()->nop();
 383   debug_only(__ stop("should have gone to the caller");)
 384   assert(code_offset() - offset <= exception_handler_size, "overflow");
 385   __ end_a_stub();
 386 
 387   return offset;
 388 }
 389 
 390 
 391 int LIR_Assembler::emit_deopt_handler() {
 392   // if the last instruction is a call (typically to do a throw which
 393   // is coming at the end after block reordering) the return address
 394   // must still point into the code area in order to avoid assertion
 395   // failures when searching for the corresponding bci => add a nop
 396   // (was bug 5/14/1999 - gri)
 397   __ nop();
 398 
 399   // generate code for deopt handler
 400   ciMethod* method = compilation()->method();
 401   address handler_base = __ start_a_stub(deopt_handler_size);
 402   if (handler_base == NULL) {
 403     // not enough space left for the handler
 404     bailout("deopt handler overflow");
 405     return -1;
 406   }
 407 
 408   int offset = code_offset();
 409   AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
 410   __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp
 411   __ delayed()->nop();
 412   assert(code_offset() - offset <= deopt_handler_size, "overflow");
 413   debug_only(__ stop("should have gone to the caller");)
 414   __ end_a_stub();
 415 
 416   return offset;
 417 }
 418 
 419 
 420 void LIR_Assembler::jobject2reg(jobject o, Register reg) {
 421   if (o == NULL) {
 422     __ set(NULL_WORD, reg);
 423   } else {
 424     int oop_index = __ oop_recorder()->find_index(o);
 425     RelocationHolder rspec = oop_Relocation::spec(oop_index);
 426     __ set(NULL_WORD, reg, rspec); // Will be set when the nmethod is created
 427   }
 428 }
 429 
 430 
 431 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
 432   // Allocate a new index in oop table to hold the oop once it's been patched
 433   int oop_index = __ oop_recorder()->allocate_index((jobject)NULL);
 434   PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, oop_index);
 435 
 436   AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
 437   assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
 438   // It may not seem necessary to use a sethi/add pair to load a NULL into dest, but the
 439   // NULL will be dynamically patched later and the patched value may be large.  We must
 440   // therefore generate the sethi/add as a placeholders
 441   __ patchable_set(addrlit, reg);
 442 
 443   patching_epilog(patch, lir_patch_normal, reg, info);
 444 }
 445 
 446 
 447 void LIR_Assembler::emit_op3(LIR_Op3* op) {
 448   Register Rdividend = op->in_opr1()->as_register();
 449   Register Rdivisor  = noreg;
 450   Register Rscratch  = op->in_opr3()->as_register();
 451   Register Rresult   = op->result_opr()->as_register();
 452   int divisor = -1;
 453 
 454   if (op->in_opr2()->is_register()) {
 455     Rdivisor = op->in_opr2()->as_register();
 456   } else {
 457     divisor = op->in_opr2()->as_constant_ptr()->as_jint();
 458     assert(Assembler::is_simm13(divisor), "can only handle simm13");
 459   }
 460 
 461   assert(Rdividend != Rscratch, "");
 462   assert(Rdivisor  != Rscratch, "");
 463   assert(op->code() == lir_idiv || op->code() == lir_irem, "Must be irem or idiv");
 464 
 465   if (Rdivisor == noreg && is_power_of_2(divisor)) {
 466     // convert division by a power of two into some shifts and logical operations
 467     if (op->code() == lir_idiv) {
 468       if (divisor == 2) {
 469         __ srl(Rdividend, 31, Rscratch);
 470       } else {
 471         __ sra(Rdividend, 31, Rscratch);
 472         __ and3(Rscratch, divisor - 1, Rscratch);
 473       }
 474       __ add(Rdividend, Rscratch, Rscratch);
 475       __ sra(Rscratch, log2_intptr(divisor), Rresult);
 476       return;
 477     } else {
 478       if (divisor == 2) {
 479         __ srl(Rdividend, 31, Rscratch);
 480       } else {
 481         __ sra(Rdividend, 31, Rscratch);
 482         __ and3(Rscratch, divisor - 1,Rscratch);
 483       }
 484       __ add(Rdividend, Rscratch, Rscratch);
 485       __ andn(Rscratch, divisor - 1,Rscratch);
 486       __ sub(Rdividend, Rscratch, Rresult);
 487       return;
 488     }
 489   }
 490 
 491   __ sra(Rdividend, 31, Rscratch);
 492   __ wry(Rscratch);
 493   if (!VM_Version::v9_instructions_work()) {
 494     // v9 doesn't require these nops
 495     __ nop();
 496     __ nop();
 497     __ nop();
 498     __ nop();
 499   }
 500 
 501   add_debug_info_for_div0_here(op->info());
 502 
 503   if (Rdivisor != noreg) {
 504     __ sdivcc(Rdividend, Rdivisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 505   } else {
 506     assert(Assembler::is_simm13(divisor), "can only handle simm13");
 507     __ sdivcc(Rdividend, divisor, (op->code() == lir_idiv ? Rresult : Rscratch));
 508   }
 509 
 510   Label skip;
 511   __ br(Assembler::overflowSet, true, Assembler::pn, skip);
 512   __ delayed()->Assembler::sethi(0x80000000, (op->code() == lir_idiv ? Rresult : Rscratch));
 513   __ bind(skip);
 514 
 515   if (op->code() == lir_irem) {
 516     if (Rdivisor != noreg) {
 517       __ smul(Rscratch, Rdivisor, Rscratch);
 518     } else {
 519       __ smul(Rscratch, divisor, Rscratch);
 520     }
 521     __ sub(Rdividend, Rscratch, Rresult);
 522   }
 523 }
 524 
 525 
 526 void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) {
 527 #ifdef ASSERT
 528   assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label");
 529   if (op->block() != NULL)  _branch_target_blocks.append(op->block());
 530   if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock());
 531 #endif
 532   assert(op->info() == NULL, "shouldn't have CodeEmitInfo");
 533 
 534   if (op->cond() == lir_cond_always) {
 535     __ br(Assembler::always, false, Assembler::pt, *(op->label()));
 536   } else if (op->code() == lir_cond_float_branch) {
 537     assert(op->ublock() != NULL, "must have unordered successor");
 538     bool is_unordered = (op->ublock() == op->block());
 539     Assembler::Condition acond;
 540     switch (op->cond()) {
 541       case lir_cond_equal:         acond = Assembler::f_equal;    break;
 542       case lir_cond_notEqual:      acond = Assembler::f_notEqual; break;
 543       case lir_cond_less:          acond = (is_unordered ? Assembler::f_unorderedOrLess          : Assembler::f_less);           break;
 544       case lir_cond_greater:       acond = (is_unordered ? Assembler::f_unorderedOrGreater       : Assembler::f_greater);        break;
 545       case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
 546       case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
 547       default :                         ShouldNotReachHere();
 548     };
 549 
 550     if (!VM_Version::v9_instructions_work()) {
 551       __ nop();
 552     }
 553     __ fb( acond, false, Assembler::pn, *(op->label()));
 554   } else {
 555     assert (op->code() == lir_branch, "just checking");
 556 
 557     Assembler::Condition acond;
 558     switch (op->cond()) {
 559       case lir_cond_equal:        acond = Assembler::equal;                break;
 560       case lir_cond_notEqual:     acond = Assembler::notEqual;             break;
 561       case lir_cond_less:         acond = Assembler::less;                 break;
 562       case lir_cond_lessEqual:    acond = Assembler::lessEqual;            break;
 563       case lir_cond_greaterEqual: acond = Assembler::greaterEqual;         break;
 564       case lir_cond_greater:      acond = Assembler::greater;              break;
 565       case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned; break;
 566       case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;    break;
 567       default:                         ShouldNotReachHere();
 568     };
 569 
 570     // sparc has different condition codes for testing 32-bit
 571     // vs. 64-bit values.  We could always test xcc is we could
 572     // guarantee that 32-bit loads always sign extended but that isn't
 573     // true and since sign extension isn't free, it would impose a
 574     // slight cost.
 575 #ifdef _LP64
 576     if  (op->type() == T_INT) {
 577       __ br(acond, false, Assembler::pn, *(op->label()));
 578     } else
 579 #endif
 580       __ brx(acond, false, Assembler::pn, *(op->label()));
 581   }
 582   // The peephole pass fills the delay slot
 583 }
 584 
 585 
 586 void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 587   Bytecodes::Code code = op->bytecode();
 588   LIR_Opr dst = op->result_opr();
 589 
 590   switch(code) {
 591     case Bytecodes::_i2l: {
 592       Register rlo  = dst->as_register_lo();
 593       Register rhi  = dst->as_register_hi();
 594       Register rval = op->in_opr()->as_register();
 595 #ifdef _LP64
 596       __ sra(rval, 0, rlo);
 597 #else
 598       __ mov(rval, rlo);
 599       __ sra(rval, BitsPerInt-1, rhi);
 600 #endif
 601       break;
 602     }
 603     case Bytecodes::_i2d:
 604     case Bytecodes::_i2f: {
 605       bool is_double = (code == Bytecodes::_i2d);
 606       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
 607       FloatRegisterImpl::Width w = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
 608       FloatRegister rsrc = op->in_opr()->as_float_reg();
 609       if (rsrc != rdst) {
 610         __ fmov(FloatRegisterImpl::S, rsrc, rdst);
 611       }
 612       __ fitof(w, rdst, rdst);
 613       break;
 614     }
 615     case Bytecodes::_f2i:{
 616       FloatRegister rsrc = op->in_opr()->as_float_reg();
 617       Address       addr = frame_map()->address_for_slot(dst->single_stack_ix());
 618       Label L;
 619       // result must be 0 if value is NaN; test by comparing value to itself
 620       __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
 621       if (!VM_Version::v9_instructions_work()) {
 622         __ nop();
 623       }
 624       __ fb(Assembler::f_unordered, true, Assembler::pn, L);
 625       __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
 626       __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
 627       // move integer result from float register to int register
 628       __ stf(FloatRegisterImpl::S, rsrc, addr.base(), addr.disp());
 629       __ bind (L);
 630       break;
 631     }
 632     case Bytecodes::_l2i: {
 633       Register rlo  = op->in_opr()->as_register_lo();
 634       Register rhi  = op->in_opr()->as_register_hi();
 635       Register rdst = dst->as_register();
 636 #ifdef _LP64
 637       __ sra(rlo, 0, rdst);
 638 #else
 639       __ mov(rlo, rdst);
 640 #endif
 641       break;
 642     }
 643     case Bytecodes::_d2f:
 644     case Bytecodes::_f2d: {
 645       bool is_double = (code == Bytecodes::_f2d);
 646       assert((!is_double && dst->is_single_fpu()) || (is_double && dst->is_double_fpu()), "check");
 647       LIR_Opr val = op->in_opr();
 648       FloatRegister rval = (code == Bytecodes::_d2f) ? val->as_double_reg() : val->as_float_reg();
 649       FloatRegister rdst = is_double ? dst->as_double_reg() : dst->as_float_reg();
 650       FloatRegisterImpl::Width vw = is_double ? FloatRegisterImpl::S : FloatRegisterImpl::D;
 651       FloatRegisterImpl::Width dw = is_double ? FloatRegisterImpl::D : FloatRegisterImpl::S;
 652       __ ftof(vw, dw, rval, rdst);
 653       break;
 654     }
 655     case Bytecodes::_i2s:
 656     case Bytecodes::_i2b: {
 657       Register rval = op->in_opr()->as_register();
 658       Register rdst = dst->as_register();
 659       int shift = (code == Bytecodes::_i2b) ? (BitsPerInt - T_BYTE_aelem_bytes * BitsPerByte) : (BitsPerInt - BitsPerShort);
 660       __ sll (rval, shift, rdst);
 661       __ sra (rdst, shift, rdst);
 662       break;
 663     }
 664     case Bytecodes::_i2c: {
 665       Register rval = op->in_opr()->as_register();
 666       Register rdst = dst->as_register();
 667       int shift = BitsPerInt - T_CHAR_aelem_bytes * BitsPerByte;
 668       __ sll (rval, shift, rdst);
 669       __ srl (rdst, shift, rdst);
 670       break;
 671     }
 672 
 673     default: ShouldNotReachHere();
 674   }
 675 }
 676 
 677 
 678 void LIR_Assembler::align_call(LIR_Code) {
 679   // do nothing since all instructions are word aligned on sparc
 680 }
 681 
 682 
 683 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
 684   __ call(op->addr(), rtype);
 685   // the peephole pass fills the delay slot
 686 }
 687 
 688 
 689 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
 690   RelocationHolder rspec = virtual_call_Relocation::spec(pc());
 691   __ set_oop((jobject)Universe::non_oop_word(), G5_inline_cache_reg);
 692   __ relocate(rspec);
 693   __ call(op->addr(), relocInfo::none);
 694   // the peephole pass fills the delay slot
 695 }
 696 
 697 
 698 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
 699   add_debug_info_for_null_check_here(op->info());
 700   __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
 701   if (__ is_simm13(op->vtable_offset())) {
 702     __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
 703   } else {
 704     // This will generate 2 instructions
 705     __ set(op->vtable_offset(), G5_method);
 706     // ld_ptr, set_hi, set
 707     __ ld_ptr(G3_scratch, G5_method, G5_method);
 708   }
 709   __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch);
 710   __ callr(G3_scratch, G0);
 711   // the peephole pass fills the delay slot
 712 }
 713 
 714 
 715 void LIR_Assembler::preserve_SP(LIR_OpJavaCall* op) {
 716   Unimplemented();
 717 }
 718 
 719 
 720 void LIR_Assembler::restore_SP(LIR_OpJavaCall* op) {
 721   Unimplemented();
 722 }
 723 
 724 
 725 // load with 32-bit displacement
 726 int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
 727   int load_offset = code_offset();
 728   if (Assembler::is_simm13(disp)) {
 729     if (info != NULL) add_debug_info_for_null_check_here(info);
 730     switch(ld_type) {
 731       case T_BOOLEAN: // fall through
 732       case T_BYTE  : __ ldsb(s, disp, d); break;
 733       case T_CHAR  : __ lduh(s, disp, d); break;
 734       case T_SHORT : __ ldsh(s, disp, d); break;
 735       case T_INT   : __ ld(s, disp, d); break;
 736       case T_ADDRESS:// fall through
 737       case T_ARRAY : // fall through
 738       case T_OBJECT: __ ld_ptr(s, disp, d); break;
 739       default      : ShouldNotReachHere();
 740     }
 741   } else {
 742     __ set(disp, O7);
 743     if (info != NULL) add_debug_info_for_null_check_here(info);
 744     load_offset = code_offset();
 745     switch(ld_type) {
 746       case T_BOOLEAN: // fall through
 747       case T_BYTE  : __ ldsb(s, O7, d); break;
 748       case T_CHAR  : __ lduh(s, O7, d); break;
 749       case T_SHORT : __ ldsh(s, O7, d); break;
 750       case T_INT   : __ ld(s, O7, d); break;
 751       case T_ADDRESS:// fall through
 752       case T_ARRAY : // fall through
 753       case T_OBJECT: __ ld_ptr(s, O7, d); break;
 754       default      : ShouldNotReachHere();
 755     }
 756   }
 757   if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d);
 758   return load_offset;
 759 }
 760 
 761 
 762 // store with 32-bit displacement
 763 void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
 764   if (Assembler::is_simm13(offset)) {
 765     if (info != NULL)  add_debug_info_for_null_check_here(info);
 766     switch (type) {
 767       case T_BOOLEAN: // fall through
 768       case T_BYTE  : __ stb(value, base, offset); break;
 769       case T_CHAR  : __ sth(value, base, offset); break;
 770       case T_SHORT : __ sth(value, base, offset); break;
 771       case T_INT   : __ stw(value, base, offset); break;
 772       case T_ADDRESS:// fall through
 773       case T_ARRAY : // fall through
 774       case T_OBJECT: __ st_ptr(value, base, offset); break;
 775       default      : ShouldNotReachHere();
 776     }
 777   } else {
 778     __ set(offset, O7);
 779     if (info != NULL) add_debug_info_for_null_check_here(info);
 780     switch (type) {
 781       case T_BOOLEAN: // fall through
 782       case T_BYTE  : __ stb(value, base, O7); break;
 783       case T_CHAR  : __ sth(value, base, O7); break;
 784       case T_SHORT : __ sth(value, base, O7); break;
 785       case T_INT   : __ stw(value, base, O7); break;
 786       case T_ADDRESS:// fall through
 787       case T_ARRAY : //fall through
 788       case T_OBJECT: __ st_ptr(value, base, O7); break;
 789       default      : ShouldNotReachHere();
 790     }
 791   }
 792   // Note: Do the store before verification as the code might be patched!
 793   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value);
 794 }
 795 
 796 
 797 // load float with 32-bit displacement
 798 void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
 799   FloatRegisterImpl::Width w;
 800   switch(ld_type) {
 801     case T_FLOAT : w = FloatRegisterImpl::S; break;
 802     case T_DOUBLE: w = FloatRegisterImpl::D; break;
 803     default      : ShouldNotReachHere();
 804   }
 805 
 806   if (Assembler::is_simm13(disp)) {
 807     if (info != NULL) add_debug_info_for_null_check_here(info);
 808     if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) {
 809       __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor());
 810       __ ldf(FloatRegisterImpl::S, s, disp               , d);
 811     } else {
 812       __ ldf(w, s, disp, d);
 813     }
 814   } else {
 815     __ set(disp, O7);
 816     if (info != NULL) add_debug_info_for_null_check_here(info);
 817     __ ldf(w, s, O7, d);
 818   }
 819 }
 820 
 821 
 822 // store float with 32-bit displacement
 823 void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
 824   FloatRegisterImpl::Width w;
 825   switch(type) {
 826     case T_FLOAT : w = FloatRegisterImpl::S; break;
 827     case T_DOUBLE: w = FloatRegisterImpl::D; break;
 828     default      : ShouldNotReachHere();
 829   }
 830 
 831   if (Assembler::is_simm13(offset)) {
 832     if (info != NULL) add_debug_info_for_null_check_here(info);
 833     if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) {
 834       __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord);
 835       __ stf(FloatRegisterImpl::S, value             , base, offset);
 836     } else {
 837       __ stf(w, value, base, offset);
 838     }
 839   } else {
 840     __ set(offset, O7);
 841     if (info != NULL) add_debug_info_for_null_check_here(info);
 842     __ stf(w, value, O7, base);
 843   }
 844 }
 845 
 846 
 847 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) {
 848   int store_offset;
 849   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
 850     assert(!unaligned, "can't handle this");
 851     // for offsets larger than a simm13 we setup the offset in O7
 852     __ set(offset, O7);
 853     store_offset = store(from_reg, base, O7, type);
 854   } else {
 855     if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
 856     store_offset = code_offset();
 857     switch (type) {
 858       case T_BOOLEAN: // fall through
 859       case T_BYTE  : __ stb(from_reg->as_register(), base, offset); break;
 860       case T_CHAR  : __ sth(from_reg->as_register(), base, offset); break;
 861       case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
 862       case T_INT   : __ stw(from_reg->as_register(), base, offset); break;
 863       case T_LONG  :
 864 #ifdef _LP64
 865         if (unaligned || PatchALot) {
 866           __ srax(from_reg->as_register_lo(), 32, O7);
 867           __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
 868           __ stw(O7,                         base, offset + hi_word_offset_in_bytes);
 869         } else {
 870           __ stx(from_reg->as_register_lo(), base, offset);
 871         }
 872 #else
 873         assert(Assembler::is_simm13(offset + 4), "must be");
 874         __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
 875         __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
 876 #endif
 877         break;
 878       case T_ADDRESS:// fall through
 879       case T_ARRAY : // fall through
 880       case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break;
 881       case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
 882       case T_DOUBLE:
 883         {
 884           FloatRegister reg = from_reg->as_double_reg();
 885           // split unaligned stores
 886           if (unaligned || PatchALot) {
 887             assert(Assembler::is_simm13(offset + 4), "must be");
 888             __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
 889             __ stf(FloatRegisterImpl::S, reg,              base, offset);
 890           } else {
 891             __ stf(FloatRegisterImpl::D, reg, base, offset);
 892           }
 893           break;
 894         }
 895       default      : ShouldNotReachHere();
 896     }
 897   }
 898   return store_offset;
 899 }
 900 
 901 
 902 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) {
 903   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
 904   int store_offset = code_offset();
 905   switch (type) {
 906     case T_BOOLEAN: // fall through
 907     case T_BYTE  : __ stb(from_reg->as_register(), base, disp); break;
 908     case T_CHAR  : __ sth(from_reg->as_register(), base, disp); break;
 909     case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
 910     case T_INT   : __ stw(from_reg->as_register(), base, disp); break;
 911     case T_LONG  :
 912 #ifdef _LP64
 913       __ stx(from_reg->as_register_lo(), base, disp);
 914 #else
 915       assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match");
 916       __ std(from_reg->as_register_hi(), base, disp);
 917 #endif
 918       break;
 919     case T_ADDRESS:// fall through
 920     case T_ARRAY : // fall through
 921     case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break;
 922     case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
 923     case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
 924     default      : ShouldNotReachHere();
 925   }
 926   return store_offset;
 927 }
 928 
 929 
 930 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) {
 931   int load_offset;
 932   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
 933     assert(base != O7, "destroying register");
 934     assert(!unaligned, "can't handle this");
 935     // for offsets larger than a simm13 we setup the offset in O7
 936     __ set(offset, O7);
 937     load_offset = load(base, O7, to_reg, type);
 938   } else {
 939     load_offset = code_offset();
 940     switch(type) {
 941       case T_BOOLEAN: // fall through
 942       case T_BYTE  : __ ldsb(base, offset, to_reg->as_register()); break;
 943       case T_CHAR  : __ lduh(base, offset, to_reg->as_register()); break;
 944       case T_SHORT : __ ldsh(base, offset, to_reg->as_register()); break;
 945       case T_INT   : __ ld(base, offset, to_reg->as_register()); break;
 946       case T_LONG  :
 947         if (!unaligned) {
 948 #ifdef _LP64
 949           __ ldx(base, offset, to_reg->as_register_lo());
 950 #else
 951           assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
 952                  "must be sequential");
 953           __ ldd(base, offset, to_reg->as_register_hi());
 954 #endif
 955         } else {
 956 #ifdef _LP64
 957           assert(base != to_reg->as_register_lo(), "can't handle this");
 958           assert(O7 != to_reg->as_register_lo(), "can't handle this");
 959           __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
 960           __ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
 961           __ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
 962           __ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
 963 #else
 964           if (base == to_reg->as_register_lo()) {
 965             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
 966             __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
 967           } else {
 968             __ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
 969             __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
 970           }
 971 #endif
 972         }
 973         break;
 974       case T_ADDRESS:// fall through
 975       case T_ARRAY : // fall through
 976       case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break;
 977       case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
 978       case T_DOUBLE:
 979         {
 980           FloatRegister reg = to_reg->as_double_reg();
 981           // split unaligned loads
 982           if (unaligned || PatchALot) {
 983             __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
 984             __ ldf(FloatRegisterImpl::S, base, offset,     reg);
 985           } else {
 986             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
 987           }
 988           break;
 989         }
 990       default      : ShouldNotReachHere();
 991     }
 992     if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
 993   }
 994   return load_offset;
 995 }
 996 
 997 
 998 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) {
 999   int load_offset = code_offset();
1000   switch(type) {
1001     case T_BOOLEAN: // fall through
1002     case T_BYTE  : __ ldsb(base, disp, to_reg->as_register()); break;
1003     case T_CHAR  : __ lduh(base, disp, to_reg->as_register()); break;
1004     case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
1005     case T_INT   : __ ld(base, disp, to_reg->as_register()); break;
1006     case T_ADDRESS:// fall through
1007     case T_ARRAY : // fall through
1008     case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break;
1009     case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
1010     case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
1011     case T_LONG  :
1012 #ifdef _LP64
1013       __ ldx(base, disp, to_reg->as_register_lo());
1014 #else
1015       assert(to_reg->as_register_hi()->successor() == to_reg->as_register_lo(),
1016              "must be sequential");
1017       __ ldd(base, disp, to_reg->as_register_hi());
1018 #endif
1019       break;
1020     default      : ShouldNotReachHere();
1021   }
1022   if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
1023   return load_offset;
1024 }
1025 
1026 
1027 // load/store with an Address
1028 void LIR_Assembler::load(const Address& a, Register d,  BasicType ld_type, CodeEmitInfo *info, int offset) {
1029   load(a.base(), a.disp() + offset, d, ld_type, info);
1030 }
1031 
1032 
1033 void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
1034   store(value, dest.base(), dest.disp() + offset, type, info);
1035 }
1036 
1037 
1038 // loadf/storef with an Address
1039 void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) {
1040   load(a.base(), a.disp() + offset, d, ld_type, info);
1041 }
1042 
1043 
1044 void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
1045   store(value, dest.base(), dest.disp() + offset, type, info);
1046 }
1047 
1048 
1049 // load/store with an Address
1050 void LIR_Assembler::load(LIR_Address* a, Register d,  BasicType ld_type, CodeEmitInfo *info) {
1051   load(as_Address(a), d, ld_type, info);
1052 }
1053 
1054 
1055 void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
1056   store(value, as_Address(dest), type, info);
1057 }
1058 
1059 
1060 // loadf/storef with an Address
1061 void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
1062   load(as_Address(a), d, ld_type, info);
1063 }
1064 
1065 
1066 void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
1067   store(value, as_Address(dest), type, info);
1068 }
1069 
1070 
1071 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
1072   LIR_Const* c = src->as_constant_ptr();
1073   switch (c->type()) {
1074     case T_INT:
1075     case T_FLOAT:
1076     case T_ADDRESS: {
1077       Register src_reg = O7;
1078       int value = c->as_jint_bits();
1079       if (value == 0) {
1080         src_reg = G0;
1081       } else {
1082         __ set(value, O7);
1083       }
1084       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1085       __ stw(src_reg, addr.base(), addr.disp());
1086       break;
1087     }
1088     case T_OBJECT: {
1089       Register src_reg = O7;
1090       jobject2reg(c->as_jobject(), src_reg);
1091       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
1092       __ st_ptr(src_reg, addr.base(), addr.disp());
1093       break;
1094     }
1095     case T_LONG:
1096     case T_DOUBLE: {
1097       Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix());
1098 
1099       Register tmp = O7;
1100       int value_lo = c->as_jint_lo_bits();
1101       if (value_lo == 0) {
1102         tmp = G0;
1103       } else {
1104         __ set(value_lo, O7);
1105       }
1106       __ stw(tmp, addr.base(), addr.disp() + lo_word_offset_in_bytes);
1107       int value_hi = c->as_jint_hi_bits();
1108       if (value_hi == 0) {
1109         tmp = G0;
1110       } else {
1111         __ set(value_hi, O7);
1112       }
1113       __ stw(tmp, addr.base(), addr.disp() + hi_word_offset_in_bytes);
1114       break;
1115     }
1116     default:
1117       Unimplemented();
1118   }
1119 }
1120 
1121 
1122 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
1123   LIR_Const* c = src->as_constant_ptr();
1124   LIR_Address* addr     = dest->as_address_ptr();
1125   Register base = addr->base()->as_pointer_register();
1126 
1127   if (info != NULL) {
1128     add_debug_info_for_null_check_here(info);
1129   }
1130   switch (c->type()) {
1131     case T_INT:
1132     case T_FLOAT:
1133     case T_ADDRESS: {
1134       LIR_Opr tmp = FrameMap::O7_opr;
1135       int value = c->as_jint_bits();
1136       if (value == 0) {
1137         tmp = FrameMap::G0_opr;
1138       } else if (Assembler::is_simm13(value)) {
1139         __ set(value, O7);
1140       }
1141       if (addr->index()->is_valid()) {
1142         assert(addr->disp() == 0, "must be zero");
1143         store(tmp, base, addr->index()->as_pointer_register(), type);
1144       } else {
1145         assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1146         store(tmp, base, addr->disp(), type);
1147       }
1148       break;
1149     }
1150     case T_LONG:
1151     case T_DOUBLE: {
1152       assert(!addr->index()->is_valid(), "can't handle reg reg address here");
1153       assert(Assembler::is_simm13(addr->disp()) &&
1154              Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
1155 
1156       Register tmp = O7;
1157       int value_lo = c->as_jint_lo_bits();
1158       if (value_lo == 0) {
1159         tmp = G0;
1160       } else {
1161         __ set(value_lo, O7);
1162       }
1163       store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT);
1164       int value_hi = c->as_jint_hi_bits();
1165       if (value_hi == 0) {
1166         tmp = G0;
1167       } else {
1168         __ set(value_hi, O7);
1169       }
1170       store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT);
1171       break;
1172     }
1173     case T_OBJECT: {
1174       jobject obj = c->as_jobject();
1175       LIR_Opr tmp;
1176       if (obj == NULL) {
1177         tmp = FrameMap::G0_opr;
1178       } else {
1179         tmp = FrameMap::O7_opr;
1180         jobject2reg(c->as_jobject(), O7);
1181       }
1182       // handle either reg+reg or reg+disp address
1183       if (addr->index()->is_valid()) {
1184         assert(addr->disp() == 0, "must be zero");
1185         store(tmp, base, addr->index()->as_pointer_register(), type);
1186       } else {
1187         assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
1188         store(tmp, base, addr->disp(), type);
1189       }
1190 
1191       break;
1192     }
1193     default:
1194       Unimplemented();
1195   }
1196 }
1197 
1198 
1199 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
1200   LIR_Const* c = src->as_constant_ptr();
1201   LIR_Opr to_reg = dest;
1202 
1203   switch (c->type()) {
1204     case T_INT:
1205     case T_ADDRESS:
1206       {
1207         jint con = c->as_jint();
1208         if (to_reg->is_single_cpu()) {
1209           assert(patch_code == lir_patch_none, "no patching handled here");
1210           __ set(con, to_reg->as_register());
1211         } else {
1212           ShouldNotReachHere();
1213           assert(to_reg->is_single_fpu(), "wrong register kind");
1214 
1215           __ set(con, O7);
1216           Address temp_slot(SP, (frame::register_save_words * wordSize) + STACK_BIAS);
1217           __ st(O7, temp_slot);
1218           __ ldf(FloatRegisterImpl::S, temp_slot, to_reg->as_float_reg());
1219         }
1220       }
1221       break;
1222 
1223     case T_LONG:
1224       {
1225         jlong con = c->as_jlong();
1226 
1227         if (to_reg->is_double_cpu()) {
1228 #ifdef _LP64
1229           __ set(con,  to_reg->as_register_lo());
1230 #else
1231           __ set(low(con),  to_reg->as_register_lo());
1232           __ set(high(con), to_reg->as_register_hi());
1233 #endif
1234 #ifdef _LP64
1235         } else if (to_reg->is_single_cpu()) {
1236           __ set(con, to_reg->as_register());
1237 #endif
1238         } else {
1239           ShouldNotReachHere();
1240           assert(to_reg->is_double_fpu(), "wrong register kind");
1241           Address temp_slot_lo(SP, ((frame::register_save_words  ) * wordSize) + STACK_BIAS);
1242           Address temp_slot_hi(SP, ((frame::register_save_words) * wordSize) + (longSize/2) + STACK_BIAS);
1243           __ set(low(con),  O7);
1244           __ st(O7, temp_slot_lo);
1245           __ set(high(con), O7);
1246           __ st(O7, temp_slot_hi);
1247           __ ldf(FloatRegisterImpl::D, temp_slot_lo, to_reg->as_double_reg());
1248         }
1249       }
1250       break;
1251 
1252     case T_OBJECT:
1253       {
1254         if (patch_code == lir_patch_none) {
1255           jobject2reg(c->as_jobject(), to_reg->as_register());
1256         } else {
1257           jobject2reg_with_patching(to_reg->as_register(), info);
1258         }
1259       }
1260       break;
1261 
1262     case T_FLOAT:
1263       {
1264         address const_addr = __ float_constant(c->as_jfloat());
1265         if (const_addr == NULL) {
1266           bailout("const section overflow");
1267           break;
1268         }
1269         RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1270         AddressLiteral const_addrlit(const_addr, rspec);
1271         if (to_reg->is_single_fpu()) {
1272           __ patchable_sethi(const_addrlit, O7);
1273           __ relocate(rspec);
1274           __ ldf(FloatRegisterImpl::S, O7, const_addrlit.low10(), to_reg->as_float_reg());
1275 
1276         } else {
1277           assert(to_reg->is_single_cpu(), "Must be a cpu register.");
1278 
1279           __ set(const_addrlit, O7);
1280           load(O7, 0, to_reg->as_register(), T_INT);
1281         }
1282       }
1283       break;
1284 
1285     case T_DOUBLE:
1286       {
1287         address const_addr = __ double_constant(c->as_jdouble());
1288         if (const_addr == NULL) {
1289           bailout("const section overflow");
1290           break;
1291         }
1292         RelocationHolder rspec = internal_word_Relocation::spec(const_addr);
1293 
1294         if (to_reg->is_double_fpu()) {
1295           AddressLiteral const_addrlit(const_addr, rspec);
1296           __ patchable_sethi(const_addrlit, O7);
1297           __ relocate(rspec);
1298           __ ldf (FloatRegisterImpl::D, O7, const_addrlit.low10(), to_reg->as_double_reg());
1299         } else {
1300           assert(to_reg->is_double_cpu(), "Must be a long register.");
1301 #ifdef _LP64
1302           __ set(jlong_cast(c->as_jdouble()), to_reg->as_register_lo());
1303 #else
1304           __ set(low(jlong_cast(c->as_jdouble())), to_reg->as_register_lo());
1305           __ set(high(jlong_cast(c->as_jdouble())), to_reg->as_register_hi());
1306 #endif
1307         }
1308 
1309       }
1310       break;
1311 
1312     default:
1313       ShouldNotReachHere();
1314   }
1315 }
1316 
1317 Address LIR_Assembler::as_Address(LIR_Address* addr) {
1318   Register reg = addr->base()->as_register();
1319   return Address(reg, addr->disp());
1320 }
1321 
1322 
1323 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1324   switch (type) {
1325     case T_INT:
1326     case T_FLOAT: {
1327       Register tmp = O7;
1328       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1329       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1330       __ lduw(from.base(), from.disp(), tmp);
1331       __ stw(tmp, to.base(), to.disp());
1332       break;
1333     }
1334     case T_OBJECT: {
1335       Register tmp = O7;
1336       Address from = frame_map()->address_for_slot(src->single_stack_ix());
1337       Address to   = frame_map()->address_for_slot(dest->single_stack_ix());
1338       __ ld_ptr(from.base(), from.disp(), tmp);
1339       __ st_ptr(tmp, to.base(), to.disp());
1340       break;
1341     }
1342     case T_LONG:
1343     case T_DOUBLE: {
1344       Register tmp = O7;
1345       Address from = frame_map()->address_for_double_slot(src->double_stack_ix());
1346       Address to   = frame_map()->address_for_double_slot(dest->double_stack_ix());
1347       __ lduw(from.base(), from.disp(), tmp);
1348       __ stw(tmp, to.base(), to.disp());
1349       __ lduw(from.base(), from.disp() + 4, tmp);
1350       __ stw(tmp, to.base(), to.disp() + 4);
1351       break;
1352     }
1353 
1354     default:
1355       ShouldNotReachHere();
1356   }
1357 }
1358 
1359 
1360 Address LIR_Assembler::as_Address_hi(LIR_Address* addr) {
1361   Address base = as_Address(addr);
1362   return Address(base.base(), base.disp() + hi_word_offset_in_bytes);
1363 }
1364 
1365 
1366 Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
1367   Address base = as_Address(addr);
1368   return Address(base.base(), base.disp() + lo_word_offset_in_bytes);
1369 }
1370 
1371 
1372 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1373                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) {
1374 
1375   LIR_Address* addr = src_opr->as_address_ptr();
1376   LIR_Opr to_reg = dest;
1377 
1378   Register src = addr->base()->as_pointer_register();
1379   Register disp_reg = noreg;
1380   int disp_value = addr->disp();
1381   bool needs_patching = (patch_code != lir_patch_none);
1382 
1383   if (addr->base()->type() == T_OBJECT) {
1384     __ verify_oop(src);
1385   }
1386 
1387   PatchingStub* patch = NULL;
1388   if (needs_patching) {
1389     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1390     assert(!to_reg->is_double_cpu() ||
1391            patch_code == lir_patch_none ||
1392            patch_code == lir_patch_normal, "patching doesn't match register");
1393   }
1394 
1395   if (addr->index()->is_illegal()) {
1396     if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1397       if (needs_patching) {
1398         __ patchable_set(0, O7);
1399       } else {
1400         __ set(disp_value, O7);
1401       }
1402       disp_reg = O7;
1403     }
1404   } else if (unaligned || PatchALot) {
1405     __ add(src, addr->index()->as_register(), O7);
1406     src = O7;
1407   } else {
1408     disp_reg = addr->index()->as_pointer_register();
1409     assert(disp_value == 0, "can't handle 3 operand addresses");
1410   }
1411 
1412   // remember the offset of the load.  The patching_epilog must be done
1413   // before the call to add_debug_info, otherwise the PcDescs don't get
1414   // entered in increasing order.
1415   int offset = code_offset();
1416 
1417   assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1418   if (disp_reg == noreg) {
1419     offset = load(src, disp_value, to_reg, type, unaligned);
1420   } else {
1421     assert(!unaligned, "can't handle this");
1422     offset = load(src, disp_reg, to_reg, type);
1423   }
1424 
1425   if (patch != NULL) {
1426     patching_epilog(patch, patch_code, src, info);
1427   }
1428 
1429   if (info != NULL) add_debug_info_for_null_check(offset, info);
1430 }
1431 
1432 
1433 void LIR_Assembler::prefetchr(LIR_Opr src) {
1434   LIR_Address* addr = src->as_address_ptr();
1435   Address from_addr = as_Address(addr);
1436 
1437   if (VM_Version::has_v9()) {
1438     __ prefetch(from_addr, Assembler::severalReads);
1439   }
1440 }
1441 
1442 
1443 void LIR_Assembler::prefetchw(LIR_Opr src) {
1444   LIR_Address* addr = src->as_address_ptr();
1445   Address from_addr = as_Address(addr);
1446 
1447   if (VM_Version::has_v9()) {
1448     __ prefetch(from_addr, Assembler::severalWritesAndPossiblyReads);
1449   }
1450 }
1451 
1452 
1453 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1454   Address addr;
1455   if (src->is_single_word()) {
1456     addr = frame_map()->address_for_slot(src->single_stack_ix());
1457   } else if (src->is_double_word())  {
1458     addr = frame_map()->address_for_double_slot(src->double_stack_ix());
1459   }
1460 
1461   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1462   load(addr.base(), addr.disp(), dest, dest->type(), unaligned);
1463 }
1464 
1465 
1466 void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1467   Address addr;
1468   if (dest->is_single_word()) {
1469     addr = frame_map()->address_for_slot(dest->single_stack_ix());
1470   } else if (dest->is_double_word())  {
1471     addr = frame_map()->address_for_slot(dest->double_stack_ix());
1472   }
1473   bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
1474   store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned);
1475 }
1476 
1477 
1478 void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
1479   if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
1480     if (from_reg->is_double_fpu()) {
1481       // double to double moves
1482       assert(to_reg->is_double_fpu(), "should match");
1483       __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1484     } else {
1485       // float to float moves
1486       assert(to_reg->is_single_fpu(), "should match");
1487       __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1488     }
1489   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1490     if (from_reg->is_double_cpu()) {
1491 #ifdef _LP64
1492       __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1493 #else
1494       assert(to_reg->is_double_cpu() &&
1495              from_reg->as_register_hi() != to_reg->as_register_lo() &&
1496              from_reg->as_register_lo() != to_reg->as_register_hi(),
1497              "should both be long and not overlap");
1498       // long to long moves
1499       __ mov(from_reg->as_register_hi(), to_reg->as_register_hi());
1500       __ mov(from_reg->as_register_lo(), to_reg->as_register_lo());
1501 #endif
1502 #ifdef _LP64
1503     } else if (to_reg->is_double_cpu()) {
1504       // int to int moves
1505       __ mov(from_reg->as_register(), to_reg->as_register_lo());
1506 #endif
1507     } else {
1508       // int to int moves
1509       __ mov(from_reg->as_register(), to_reg->as_register());
1510     }
1511   } else {
1512     ShouldNotReachHere();
1513   }
1514   if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1515     __ verify_oop(to_reg->as_register());
1516   }
1517 }
1518 
1519 
1520 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1521                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1522                             bool unaligned) {
1523   LIR_Address* addr = dest->as_address_ptr();
1524 
1525   Register src = addr->base()->as_pointer_register();
1526   Register disp_reg = noreg;
1527   int disp_value = addr->disp();
1528   bool needs_patching = (patch_code != lir_patch_none);
1529 
1530   if (addr->base()->is_oop_register()) {
1531     __ verify_oop(src);
1532   }
1533 
1534   PatchingStub* patch = NULL;
1535   if (needs_patching) {
1536     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1537     assert(!from_reg->is_double_cpu() ||
1538            patch_code == lir_patch_none ||
1539            patch_code == lir_patch_normal, "patching doesn't match register");
1540   }
1541 
1542   if (addr->index()->is_illegal()) {
1543     if (!Assembler::is_simm13(disp_value) && (!unaligned || Assembler::is_simm13(disp_value + 4))) {
1544       if (needs_patching) {
1545         __ patchable_set(0, O7);
1546       } else {
1547         __ set(disp_value, O7);
1548       }
1549       disp_reg = O7;
1550     }
1551   } else if (unaligned || PatchALot) {
1552     __ add(src, addr->index()->as_register(), O7);
1553     src = O7;
1554   } else {
1555     disp_reg = addr->index()->as_pointer_register();
1556     assert(disp_value == 0, "can't handle 3 operand addresses");
1557   }
1558 
1559   // remember the offset of the store.  The patching_epilog must be done
1560   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1561   // entered in increasing order.
1562   int offset;
1563 
1564   assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
1565   if (disp_reg == noreg) {
1566     offset = store(from_reg, src, disp_value, type, unaligned);
1567   } else {
1568     assert(!unaligned, "can't handle this");
1569     offset = store(from_reg, src, disp_reg, type);
1570   }
1571 
1572   if (patch != NULL) {
1573     patching_epilog(patch, patch_code, src, info);
1574   }
1575 
1576   if (info != NULL) add_debug_info_for_null_check(offset, info);
1577 }
1578 
1579 
1580 void LIR_Assembler::return_op(LIR_Opr result) {
1581   // the poll may need a register so just pick one that isn't the return register
1582 #ifdef TIERED
1583   if (result->type_field() == LIR_OprDesc::long_type) {
1584     // Must move the result to G1
1585     // Must leave proper result in O0,O1 and G1 (TIERED only)
1586     __ sllx(I0, 32, G1);          // Shift bits into high G1
1587     __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
1588     __ or3 (I1, G1, G1);          // OR 64 bits into G1
1589   }
1590 #endif // TIERED
1591   __ set((intptr_t)os::get_polling_page(), L0);
1592   __ relocate(relocInfo::poll_return_type);
1593   __ ld_ptr(L0, 0, G0);
1594   __ ret();
1595   __ delayed()->restore();
1596 }
1597 
1598 
1599 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
1600   __ set((intptr_t)os::get_polling_page(), tmp->as_register());
1601   if (info != NULL) {
1602     add_debug_info_for_branch(info);
1603   } else {
1604     __ relocate(relocInfo::poll_type);
1605   }
1606 
1607   int offset = __ offset();
1608   __ ld_ptr(tmp->as_register(), 0, G0);
1609 
1610   return offset;
1611 }
1612 
1613 
1614 void LIR_Assembler::emit_static_call_stub() {
1615   address call_pc = __ pc();
1616   address stub = __ start_a_stub(call_stub_size);
1617   if (stub == NULL) {
1618     bailout("static call stub overflow");
1619     return;
1620   }
1621 
1622   int start = __ offset();
1623   __ relocate(static_stub_Relocation::spec(call_pc));
1624 
1625   __ set_oop(NULL, G5);
1626   // must be set to -1 at code generation time
1627   AddressLiteral addrlit(-1);
1628   __ jump_to(addrlit, G3);
1629   __ delayed()->nop();
1630 
1631   assert(__ offset() - start <= call_stub_size, "stub too big");
1632   __ end_a_stub();
1633 }
1634 
1635 
1636 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1637   if (opr1->is_single_fpu()) {
1638     __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, opr1->as_float_reg(), opr2->as_float_reg());
1639   } else if (opr1->is_double_fpu()) {
1640     __ fcmp(FloatRegisterImpl::D, Assembler::fcc0, opr1->as_double_reg(), opr2->as_double_reg());
1641   } else if (opr1->is_single_cpu()) {
1642     if (opr2->is_constant()) {
1643       switch (opr2->as_constant_ptr()->type()) {
1644         case T_INT:
1645           { jint con = opr2->as_constant_ptr()->as_jint();
1646             if (Assembler::is_simm13(con)) {
1647               __ cmp(opr1->as_register(), con);
1648             } else {
1649               __ set(con, O7);
1650               __ cmp(opr1->as_register(), O7);
1651             }
1652           }
1653           break;
1654 
1655         case T_OBJECT:
1656           // there are only equal/notequal comparisions on objects
1657           { jobject con = opr2->as_constant_ptr()->as_jobject();
1658             if (con == NULL) {
1659               __ cmp(opr1->as_register(), 0);
1660             } else {
1661               jobject2reg(con, O7);
1662               __ cmp(opr1->as_register(), O7);
1663             }
1664           }
1665           break;
1666 
1667         default:
1668           ShouldNotReachHere();
1669           break;
1670       }
1671     } else {
1672       if (opr2->is_address()) {
1673         LIR_Address * addr = opr2->as_address_ptr();
1674         BasicType type = addr->type();
1675         if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1676         else                    __ ld(as_Address(addr), O7);
1677         __ cmp(opr1->as_register(), O7);
1678       } else {
1679         __ cmp(opr1->as_register(), opr2->as_register());
1680       }
1681     }
1682   } else if (opr1->is_double_cpu()) {
1683     Register xlo = opr1->as_register_lo();
1684     Register xhi = opr1->as_register_hi();
1685     if (opr2->is_constant() && opr2->as_jlong() == 0) {
1686       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles these cases");
1687 #ifdef _LP64
1688       __ orcc(xhi, G0, G0);
1689 #else
1690       __ orcc(xhi, xlo, G0);
1691 #endif
1692     } else if (opr2->is_register()) {
1693       Register ylo = opr2->as_register_lo();
1694       Register yhi = opr2->as_register_hi();
1695 #ifdef _LP64
1696       __ cmp(xlo, ylo);
1697 #else
1698       __ subcc(xlo, ylo, xlo);
1699       __ subccc(xhi, yhi, xhi);
1700       if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
1701         __ orcc(xhi, xlo, G0);
1702       }
1703 #endif
1704     } else {
1705       ShouldNotReachHere();
1706     }
1707   } else if (opr1->is_address()) {
1708     LIR_Address * addr = opr1->as_address_ptr();
1709     BasicType type = addr->type();
1710     assert (opr2->is_constant(), "Checking");
1711     if ( type == T_OBJECT ) __ ld_ptr(as_Address(addr), O7);
1712     else                    __ ld(as_Address(addr), O7);
1713     __ cmp(O7, opr2->as_constant_ptr()->as_jint());
1714   } else {
1715     ShouldNotReachHere();
1716   }
1717 }
1718 
1719 
1720 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){
1721   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
1722     bool is_unordered_less = (code == lir_ucmp_fd2i);
1723     if (left->is_single_fpu()) {
1724       __ float_cmp(true, is_unordered_less ? -1 : 1, left->as_float_reg(), right->as_float_reg(), dst->as_register());
1725     } else if (left->is_double_fpu()) {
1726       __ float_cmp(false, is_unordered_less ? -1 : 1, left->as_double_reg(), right->as_double_reg(), dst->as_register());
1727     } else {
1728       ShouldNotReachHere();
1729     }
1730   } else if (code == lir_cmp_l2i) {
1731 #ifdef _LP64
1732     __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register());
1733 #else
1734     __ lcmp(left->as_register_hi(),  left->as_register_lo(),
1735             right->as_register_hi(), right->as_register_lo(),
1736             dst->as_register());
1737 #endif
1738   } else {
1739     ShouldNotReachHere();
1740   }
1741 }
1742 
1743 
1744 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
1745 
1746   Assembler::Condition acond;
1747   switch (condition) {
1748     case lir_cond_equal:        acond = Assembler::equal;        break;
1749     case lir_cond_notEqual:     acond = Assembler::notEqual;     break;
1750     case lir_cond_less:         acond = Assembler::less;         break;
1751     case lir_cond_lessEqual:    acond = Assembler::lessEqual;    break;
1752     case lir_cond_greaterEqual: acond = Assembler::greaterEqual; break;
1753     case lir_cond_greater:      acond = Assembler::greater;      break;
1754     case lir_cond_aboveEqual:   acond = Assembler::greaterEqualUnsigned;      break;
1755     case lir_cond_belowEqual:   acond = Assembler::lessEqualUnsigned;      break;
1756     default:                         ShouldNotReachHere();
1757   };
1758 
1759   if (opr1->is_constant() && opr1->type() == T_INT) {
1760     Register dest = result->as_register();
1761     // load up first part of constant before branch
1762     // and do the rest in the delay slot.
1763     if (!Assembler::is_simm13(opr1->as_jint())) {
1764       __ sethi(opr1->as_jint(), dest);
1765     }
1766   } else if (opr1->is_constant()) {
1767     const2reg(opr1, result, lir_patch_none, NULL);
1768   } else if (opr1->is_register()) {
1769     reg2reg(opr1, result);
1770   } else if (opr1->is_stack()) {
1771     stack2reg(opr1, result, result->type());
1772   } else {
1773     ShouldNotReachHere();
1774   }
1775   Label skip;
1776   __ br(acond, false, Assembler::pt, skip);
1777   if (opr1->is_constant() && opr1->type() == T_INT) {
1778     Register dest = result->as_register();
1779     if (Assembler::is_simm13(opr1->as_jint())) {
1780       __ delayed()->or3(G0, opr1->as_jint(), dest);
1781     } else {
1782       // the sethi has been done above, so just put in the low 10 bits
1783       __ delayed()->or3(dest, opr1->as_jint() & 0x3ff, dest);
1784     }
1785   } else {
1786     // can't do anything useful in the delay slot
1787     __ delayed()->nop();
1788   }
1789   if (opr2->is_constant()) {
1790     const2reg(opr2, result, lir_patch_none, NULL);
1791   } else if (opr2->is_register()) {
1792     reg2reg(opr2, result);
1793   } else if (opr2->is_stack()) {
1794     stack2reg(opr2, result, result->type());
1795   } else {
1796     ShouldNotReachHere();
1797   }
1798   __ bind(skip);
1799 }
1800 
1801 
1802 void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack) {
1803   assert(info == NULL, "unused on this code path");
1804   assert(left->is_register(), "wrong items state");
1805   assert(dest->is_register(), "wrong items state");
1806 
1807   if (right->is_register()) {
1808     if (dest->is_float_kind()) {
1809 
1810       FloatRegister lreg, rreg, res;
1811       FloatRegisterImpl::Width w;
1812       if (right->is_single_fpu()) {
1813         w = FloatRegisterImpl::S;
1814         lreg = left->as_float_reg();
1815         rreg = right->as_float_reg();
1816         res  = dest->as_float_reg();
1817       } else {
1818         w = FloatRegisterImpl::D;
1819         lreg = left->as_double_reg();
1820         rreg = right->as_double_reg();
1821         res  = dest->as_double_reg();
1822       }
1823 
1824       switch (code) {
1825         case lir_add: __ fadd(w, lreg, rreg, res); break;
1826         case lir_sub: __ fsub(w, lreg, rreg, res); break;
1827         case lir_mul: // fall through
1828         case lir_mul_strictfp: __ fmul(w, lreg, rreg, res); break;
1829         case lir_div: // fall through
1830         case lir_div_strictfp: __ fdiv(w, lreg, rreg, res); break;
1831         default: ShouldNotReachHere();
1832       }
1833 
1834     } else if (dest->is_double_cpu()) {
1835 #ifdef _LP64
1836       Register dst_lo = dest->as_register_lo();
1837       Register op1_lo = left->as_pointer_register();
1838       Register op2_lo = right->as_pointer_register();
1839 
1840       switch (code) {
1841         case lir_add:
1842           __ add(op1_lo, op2_lo, dst_lo);
1843           break;
1844 
1845         case lir_sub:
1846           __ sub(op1_lo, op2_lo, dst_lo);
1847           break;
1848 
1849         default: ShouldNotReachHere();
1850       }
1851 #else
1852       Register op1_lo = left->as_register_lo();
1853       Register op1_hi = left->as_register_hi();
1854       Register op2_lo = right->as_register_lo();
1855       Register op2_hi = right->as_register_hi();
1856       Register dst_lo = dest->as_register_lo();
1857       Register dst_hi = dest->as_register_hi();
1858 
1859       switch (code) {
1860         case lir_add:
1861           __ addcc(op1_lo, op2_lo, dst_lo);
1862           __ addc (op1_hi, op2_hi, dst_hi);
1863           break;
1864 
1865         case lir_sub:
1866           __ subcc(op1_lo, op2_lo, dst_lo);
1867           __ subc (op1_hi, op2_hi, dst_hi);
1868           break;
1869 
1870         default: ShouldNotReachHere();
1871       }
1872 #endif
1873     } else {
1874       assert (right->is_single_cpu(), "Just Checking");
1875 
1876       Register lreg = left->as_register();
1877       Register res  = dest->as_register();
1878       Register rreg = right->as_register();
1879       switch (code) {
1880         case lir_add:  __ add  (lreg, rreg, res); break;
1881         case lir_sub:  __ sub  (lreg, rreg, res); break;
1882         case lir_mul:  __ mult (lreg, rreg, res); break;
1883         default: ShouldNotReachHere();
1884       }
1885     }
1886   } else {
1887     assert (right->is_constant(), "must be constant");
1888 
1889     if (dest->is_single_cpu()) {
1890       Register lreg = left->as_register();
1891       Register res  = dest->as_register();
1892       int    simm13 = right->as_constant_ptr()->as_jint();
1893 
1894       switch (code) {
1895         case lir_add:  __ add  (lreg, simm13, res); break;
1896         case lir_sub:  __ sub  (lreg, simm13, res); break;
1897         case lir_mul:  __ mult (lreg, simm13, res); break;
1898         default: ShouldNotReachHere();
1899       }
1900     } else {
1901       Register lreg = left->as_pointer_register();
1902       Register res  = dest->as_register_lo();
1903       long con = right->as_constant_ptr()->as_jlong();
1904       assert(Assembler::is_simm13(con), "must be simm13");
1905 
1906       switch (code) {
1907         case lir_add:  __ add  (lreg, (int)con, res); break;
1908         case lir_sub:  __ sub  (lreg, (int)con, res); break;
1909         case lir_mul:  __ mult (lreg, (int)con, res); break;
1910         default: ShouldNotReachHere();
1911       }
1912     }
1913   }
1914 }
1915 
1916 
1917 void LIR_Assembler::fpop() {
1918   // do nothing
1919 }
1920 
1921 
1922 void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) {
1923   switch (code) {
1924     case lir_sin:
1925     case lir_tan:
1926     case lir_cos: {
1927       assert(thread->is_valid(), "preserve the thread object for performance reasons");
1928       assert(dest->as_double_reg() == F0, "the result will be in f0/f1");
1929       break;
1930     }
1931     case lir_sqrt: {
1932       assert(!thread->is_valid(), "there is no need for a thread_reg for dsqrt");
1933       FloatRegister src_reg = value->as_double_reg();
1934       FloatRegister dst_reg = dest->as_double_reg();
1935       __ fsqrt(FloatRegisterImpl::D, src_reg, dst_reg);
1936       break;
1937     }
1938     case lir_abs: {
1939       assert(!thread->is_valid(), "there is no need for a thread_reg for fabs");
1940       FloatRegister src_reg = value->as_double_reg();
1941       FloatRegister dst_reg = dest->as_double_reg();
1942       __ fabs(FloatRegisterImpl::D, src_reg, dst_reg);
1943       break;
1944     }
1945     default: {
1946       ShouldNotReachHere();
1947       break;
1948     }
1949   }
1950 }
1951 
1952 
1953 void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) {
1954   if (right->is_constant()) {
1955     if (dest->is_single_cpu()) {
1956       int simm13 = right->as_constant_ptr()->as_jint();
1957       switch (code) {
1958         case lir_logic_and:   __ and3 (left->as_register(), simm13, dest->as_register()); break;
1959         case lir_logic_or:    __ or3  (left->as_register(), simm13, dest->as_register()); break;
1960         case lir_logic_xor:   __ xor3 (left->as_register(), simm13, dest->as_register()); break;
1961         default: ShouldNotReachHere();
1962       }
1963     } else {
1964       long c = right->as_constant_ptr()->as_jlong();
1965       assert(c == (int)c && Assembler::is_simm13(c), "out of range");
1966       int simm13 = (int)c;
1967       switch (code) {
1968         case lir_logic_and:
1969 #ifndef _LP64
1970           __ and3 (left->as_register_hi(), 0,      dest->as_register_hi());
1971 #endif
1972           __ and3 (left->as_register_lo(), simm13, dest->as_register_lo());
1973           break;
1974 
1975         case lir_logic_or:
1976 #ifndef _LP64
1977           __ or3 (left->as_register_hi(), 0,      dest->as_register_hi());
1978 #endif
1979           __ or3 (left->as_register_lo(), simm13, dest->as_register_lo());
1980           break;
1981 
1982         case lir_logic_xor:
1983 #ifndef _LP64
1984           __ xor3 (left->as_register_hi(), 0,      dest->as_register_hi());
1985 #endif
1986           __ xor3 (left->as_register_lo(), simm13, dest->as_register_lo());
1987           break;
1988 
1989         default: ShouldNotReachHere();
1990       }
1991     }
1992   } else {
1993     assert(right->is_register(), "right should be in register");
1994 
1995     if (dest->is_single_cpu()) {
1996       switch (code) {
1997         case lir_logic_and:   __ and3 (left->as_register(), right->as_register(), dest->as_register()); break;
1998         case lir_logic_or:    __ or3  (left->as_register(), right->as_register(), dest->as_register()); break;
1999         case lir_logic_xor:   __ xor3 (left->as_register(), right->as_register(), dest->as_register()); break;
2000         default: ShouldNotReachHere();
2001       }
2002     } else {
2003 #ifdef _LP64
2004       Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() :
2005                                                                         left->as_register_lo();
2006       Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() :
2007                                                                           right->as_register_lo();
2008 
2009       switch (code) {
2010         case lir_logic_and: __ and3 (l, r, dest->as_register_lo()); break;
2011         case lir_logic_or:  __ or3  (l, r, dest->as_register_lo()); break;
2012         case lir_logic_xor: __ xor3 (l, r, dest->as_register_lo()); break;
2013         default: ShouldNotReachHere();
2014       }
2015 #else
2016       switch (code) {
2017         case lir_logic_and:
2018           __ and3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2019           __ and3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2020           break;
2021 
2022         case lir_logic_or:
2023           __ or3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2024           __ or3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2025           break;
2026 
2027         case lir_logic_xor:
2028           __ xor3 (left->as_register_hi(), right->as_register_hi(), dest->as_register_hi());
2029           __ xor3 (left->as_register_lo(), right->as_register_lo(), dest->as_register_lo());
2030           break;
2031 
2032         default: ShouldNotReachHere();
2033       }
2034 #endif
2035     }
2036   }
2037 }
2038 
2039 
2040 int LIR_Assembler::shift_amount(BasicType t) {
2041   int elem_size = type2aelembytes(t);
2042   switch (elem_size) {
2043     case 1 : return 0;
2044     case 2 : return 1;
2045     case 4 : return 2;
2046     case 8 : return 3;
2047   }
2048   ShouldNotReachHere();
2049   return -1;
2050 }
2051 
2052 
2053 void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info, bool unwind) {
2054   assert(exceptionOop->as_register() == Oexception, "should match");
2055   assert(unwind || exceptionPC->as_register() == Oissuing_pc, "should match");
2056 
2057   info->add_register_oop(exceptionOop);
2058 
2059   if (unwind) {
2060     __ call(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type);
2061     __ delayed()->nop();
2062   } else {
2063     // reuse the debug info from the safepoint poll for the throw op itself
2064     address pc_for_athrow  = __ pc();
2065     int pc_for_athrow_offset = __ offset();
2066     RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow);
2067     __ set(pc_for_athrow, Oissuing_pc, rspec);
2068     add_call_info(pc_for_athrow_offset, info); // for exception handler
2069 
2070     __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type);
2071     __ delayed()->nop();
2072   }
2073 }
2074 
2075 
2076 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2077   Register src = op->src()->as_register();
2078   Register dst = op->dst()->as_register();
2079   Register src_pos = op->src_pos()->as_register();
2080   Register dst_pos = op->dst_pos()->as_register();
2081   Register length  = op->length()->as_register();
2082   Register tmp = op->tmp()->as_register();
2083   Register tmp2 = O7;
2084 
2085   int flags = op->flags();
2086   ciArrayKlass* default_type = op->expected_type();
2087   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2088   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2089 
2090   // set up the arraycopy stub information
2091   ArrayCopyStub* stub = op->stub();
2092 
2093   // always do stub if no type information is available.  it's ok if
2094   // the known type isn't loaded since the code sanity checks
2095   // in debug mode and the type isn't required when we know the exact type
2096   // also check that the type is an array type.
2097   // We also, for now, always call the stub if the barrier set requires a
2098   // write_ref_pre barrier (which the stub does, but none of the optimized
2099   // cases currently does).
2100   if (op->expected_type() == NULL ||
2101       Universe::heap()->barrier_set()->has_write_ref_pre_barrier()) {
2102     __ mov(src,     O0);
2103     __ mov(src_pos, O1);
2104     __ mov(dst,     O2);
2105     __ mov(dst_pos, O3);
2106     __ mov(length,  O4);
2107     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
2108 
2109     __ br_zero(Assembler::less, false, Assembler::pn, O0, *stub->entry());
2110     __ delayed()->nop();
2111     __ bind(*stub->continuation());
2112     return;
2113   }
2114 
2115   assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point");
2116 
2117   // make sure src and dst are non-null and load array length
2118   if (flags & LIR_OpArrayCopy::src_null_check) {
2119     __ tst(src);
2120     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2121     __ delayed()->nop();
2122   }
2123 
2124   if (flags & LIR_OpArrayCopy::dst_null_check) {
2125     __ tst(dst);
2126     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2127     __ delayed()->nop();
2128   }
2129 
2130   if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
2131     // test src_pos register
2132     __ tst(src_pos);
2133     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2134     __ delayed()->nop();
2135   }
2136 
2137   if (flags & LIR_OpArrayCopy::dst_pos_positive_check) {
2138     // test dst_pos register
2139     __ tst(dst_pos);
2140     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2141     __ delayed()->nop();
2142   }
2143 
2144   if (flags & LIR_OpArrayCopy::length_positive_check) {
2145     // make sure length isn't negative
2146     __ tst(length);
2147     __ br(Assembler::less, false, Assembler::pn, *stub->entry());
2148     __ delayed()->nop();
2149   }
2150 
2151   if (flags & LIR_OpArrayCopy::src_range_check) {
2152     __ ld(src, arrayOopDesc::length_offset_in_bytes(), tmp2);
2153     __ add(length, src_pos, tmp);
2154     __ cmp(tmp2, tmp);
2155     __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2156     __ delayed()->nop();
2157   }
2158 
2159   if (flags & LIR_OpArrayCopy::dst_range_check) {
2160     __ ld(dst, arrayOopDesc::length_offset_in_bytes(), tmp2);
2161     __ add(length, dst_pos, tmp);
2162     __ cmp(tmp2, tmp);
2163     __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry());
2164     __ delayed()->nop();
2165   }
2166 
2167   if (flags & LIR_OpArrayCopy::type_check) {
2168     __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
2169     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2170     __ cmp(tmp, tmp2);
2171     __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2172     __ delayed()->nop();
2173   }
2174 
2175 #ifdef ASSERT
2176   if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
2177     // Sanity check the known type with the incoming class.  For the
2178     // primitive case the types must match exactly with src.klass and
2179     // dst.klass each exactly matching the default type.  For the
2180     // object array case, if no type check is needed then either the
2181     // dst type is exactly the expected type and the src type is a
2182     // subtype which we can't check or src is the same array as dst
2183     // but not necessarily exactly of type default_type.
2184     Label known_ok, halt;
2185     jobject2reg(op->expected_type()->constant_encoding(), tmp);
2186     __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
2187     if (basic_type != T_OBJECT) {
2188       __ cmp(tmp, tmp2);
2189       __ br(Assembler::notEqual, false, Assembler::pn, halt);
2190       __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
2191       __ cmp(tmp, tmp2);
2192       __ br(Assembler::equal, false, Assembler::pn, known_ok);
2193       __ delayed()->nop();
2194     } else {
2195       __ cmp(tmp, tmp2);
2196       __ br(Assembler::equal, false, Assembler::pn, known_ok);
2197       __ delayed()->cmp(src, dst);
2198       __ br(Assembler::equal, false, Assembler::pn, known_ok);
2199       __ delayed()->nop();
2200     }
2201     __ bind(halt);
2202     __ stop("incorrect type information in arraycopy");
2203     __ bind(known_ok);
2204   }
2205 #endif
2206 
2207   int shift = shift_amount(basic_type);
2208 
2209   Register src_ptr = O0;
2210   Register dst_ptr = O1;
2211   Register len     = O2;
2212 
2213   __ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
2214   LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
2215   if (shift == 0) {
2216     __ add(src_ptr, src_pos, src_ptr);
2217   } else {
2218     __ sll(src_pos, shift, tmp);
2219     __ add(src_ptr, tmp, src_ptr);
2220   }
2221 
2222   __ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
2223   LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
2224   if (shift == 0) {
2225     __ add(dst_ptr, dst_pos, dst_ptr);
2226   } else {
2227     __ sll(dst_pos, shift, tmp);
2228     __ add(dst_ptr, tmp, dst_ptr);
2229   }
2230 
2231   if (basic_type != T_OBJECT) {
2232     if (shift == 0) {
2233       __ mov(length, len);
2234     } else {
2235       __ sll(length, shift, len);
2236     }
2237     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::primitive_arraycopy));
2238   } else {
2239     // oop_arraycopy takes a length in number of elements, so don't scale it.
2240     __ mov(length, len);
2241     __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy));
2242   }
2243 
2244   __ bind(*stub->continuation());
2245 }
2246 
2247 
2248 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2249   if (dest->is_single_cpu()) {
2250 #ifdef _LP64
2251     if (left->type() == T_OBJECT) {
2252       switch (code) {
2253         case lir_shl:  __ sllx  (left->as_register(), count->as_register(), dest->as_register()); break;
2254         case lir_shr:  __ srax  (left->as_register(), count->as_register(), dest->as_register()); break;
2255         case lir_ushr: __ srl   (left->as_register(), count->as_register(), dest->as_register()); break;
2256         default: ShouldNotReachHere();
2257       }
2258     } else
2259 #endif
2260       switch (code) {
2261         case lir_shl:  __ sll   (left->as_register(), count->as_register(), dest->as_register()); break;
2262         case lir_shr:  __ sra   (left->as_register(), count->as_register(), dest->as_register()); break;
2263         case lir_ushr: __ srl   (left->as_register(), count->as_register(), dest->as_register()); break;
2264         default: ShouldNotReachHere();
2265       }
2266   } else {
2267 #ifdef _LP64
2268     switch (code) {
2269       case lir_shl:  __ sllx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2270       case lir_shr:  __ srax  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2271       case lir_ushr: __ srlx  (left->as_register_lo(), count->as_register(), dest->as_register_lo()); break;
2272       default: ShouldNotReachHere();
2273     }
2274 #else
2275     switch (code) {
2276       case lir_shl:  __ lshl  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2277       case lir_shr:  __ lshr  (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2278       case lir_ushr: __ lushr (left->as_register_hi(), left->as_register_lo(), count->as_register(), dest->as_register_hi(), dest->as_register_lo(), G3_scratch); break;
2279       default: ShouldNotReachHere();
2280     }
2281 #endif
2282   }
2283 }
2284 
2285 
2286 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2287 #ifdef _LP64
2288   if (left->type() == T_OBJECT) {
2289     count = count & 63;  // shouldn't shift by more than sizeof(intptr_t)
2290     Register l = left->as_register();
2291     Register d = dest->as_register_lo();
2292     switch (code) {
2293       case lir_shl:  __ sllx  (l, count, d); break;
2294       case lir_shr:  __ srax  (l, count, d); break;
2295       case lir_ushr: __ srlx  (l, count, d); break;
2296       default: ShouldNotReachHere();
2297     }
2298     return;
2299   }
2300 #endif
2301 
2302   if (dest->is_single_cpu()) {
2303     count = count & 0x1F; // Java spec
2304     switch (code) {
2305       case lir_shl:  __ sll   (left->as_register(), count, dest->as_register()); break;
2306       case lir_shr:  __ sra   (left->as_register(), count, dest->as_register()); break;
2307       case lir_ushr: __ srl   (left->as_register(), count, dest->as_register()); break;
2308       default: ShouldNotReachHere();
2309     }
2310   } else if (dest->is_double_cpu()) {
2311     count = count & 63; // Java spec
2312     switch (code) {
2313       case lir_shl:  __ sllx  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2314       case lir_shr:  __ srax  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2315       case lir_ushr: __ srlx  (left->as_pointer_register(), count, dest->as_pointer_register()); break;
2316       default: ShouldNotReachHere();
2317     }
2318   } else {
2319     ShouldNotReachHere();
2320   }
2321 }
2322 
2323 
2324 void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
2325   assert(op->tmp1()->as_register()  == G1 &&
2326          op->tmp2()->as_register()  == G3 &&
2327          op->tmp3()->as_register()  == G4 &&
2328          op->obj()->as_register()   == O0 &&
2329          op->klass()->as_register() == G5, "must be");
2330   if (op->init_check()) {
2331     __ ld(op->klass()->as_register(),
2332           instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc),
2333           op->tmp1()->as_register());
2334     add_debug_info_for_null_check_here(op->stub()->info());
2335     __ cmp(op->tmp1()->as_register(), instanceKlass::fully_initialized);
2336     __ br(Assembler::notEqual, false, Assembler::pn, *op->stub()->entry());
2337     __ delayed()->nop();
2338   }
2339   __ allocate_object(op->obj()->as_register(),
2340                      op->tmp1()->as_register(),
2341                      op->tmp2()->as_register(),
2342                      op->tmp3()->as_register(),
2343                      op->header_size(),
2344                      op->object_size(),
2345                      op->klass()->as_register(),
2346                      *op->stub()->entry());
2347   __ bind(*op->stub()->continuation());
2348   __ verify_oop(op->obj()->as_register());
2349 }
2350 
2351 
2352 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2353   assert(op->tmp1()->as_register()  == G1 &&
2354          op->tmp2()->as_register()  == G3 &&
2355          op->tmp3()->as_register()  == G4 &&
2356          op->tmp4()->as_register()  == O1 &&
2357          op->klass()->as_register() == G5, "must be");
2358   if (UseSlowPath ||
2359       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2360       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2361     __ br(Assembler::always, false, Assembler::pn, *op->stub()->entry());
2362     __ delayed()->nop();
2363   } else {
2364     __ allocate_array(op->obj()->as_register(),
2365                       op->len()->as_register(),
2366                       op->tmp1()->as_register(),
2367                       op->tmp2()->as_register(),
2368                       op->tmp3()->as_register(),
2369                       arrayOopDesc::header_size(op->type()),
2370                       type2aelembytes(op->type()),
2371                       op->klass()->as_register(),
2372                       *op->stub()->entry());
2373   }
2374   __ bind(*op->stub()->continuation());
2375 }
2376 
2377 
2378 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2379   LIR_Code code = op->code();
2380   if (code == lir_store_check) {
2381     Register value = op->object()->as_register();
2382     Register array = op->array()->as_register();
2383     Register k_RInfo = op->tmp1()->as_register();
2384     Register klass_RInfo = op->tmp2()->as_register();
2385     Register Rtmp1 = op->tmp3()->as_register();
2386 
2387     __ verify_oop(value);
2388 
2389     CodeStub* stub = op->stub();
2390     Label done;
2391     __ cmp(value, 0);
2392     __ br(Assembler::equal, false, Assembler::pn, done);
2393     __ delayed()->nop();
2394     load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
2395     load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
2396 
2397     // get instance klass
2398     load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
2399     // perform the fast part of the checking logic
2400     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL);
2401 
2402     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2403     assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2404     __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2405     __ delayed()->nop();
2406     __ cmp(G3, 0);
2407     __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2408     __ delayed()->nop();
2409     __ bind(done);
2410   } else if (op->code() == lir_checkcast) {
2411     // we always need a stub for the failure case.
2412     CodeStub* stub = op->stub();
2413     Register obj = op->object()->as_register();
2414     Register k_RInfo = op->tmp1()->as_register();
2415     Register klass_RInfo = op->tmp2()->as_register();
2416     Register dst = op->result_opr()->as_register();
2417     Register Rtmp1 = op->tmp3()->as_register();
2418     ciKlass* k = op->klass();
2419 
2420     if (obj == k_RInfo) {
2421       k_RInfo = klass_RInfo;
2422       klass_RInfo = obj;
2423     }
2424     if (op->profiled_method() != NULL) {
2425       ciMethod* method = op->profiled_method();
2426       int bci          = op->profiled_bci();
2427 
2428       // We need two temporaries to perform this operation on SPARC,
2429       // so to keep things simple we perform a redundant test here
2430       Label profile_done;
2431       __ cmp(obj, 0);
2432       __ br(Assembler::notEqual, false, Assembler::pn, profile_done);
2433       __ delayed()->nop();
2434       // Object is null; update methodDataOop
2435       ciMethodData* md = method->method_data();
2436       if (md == NULL) {
2437         bailout("out of memory building methodDataOop");
2438         return;
2439       }
2440       ciProfileData* data = md->bci_to_data(bci);
2441       assert(data != NULL,       "need data for checkcast");
2442       assert(data->is_BitData(), "need BitData for checkcast");
2443       Register mdo      = k_RInfo;
2444       Register data_val = Rtmp1;
2445       jobject2reg(md->constant_encoding(), mdo);
2446 
2447       int mdo_offset_bias = 0;
2448       if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
2449         // The offset is large so bias the mdo by the base of the slot so
2450         // that the ld can use simm13s to reference the slots of the data
2451         mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
2452         __ set(mdo_offset_bias, data_val);
2453         __ add(mdo, data_val, mdo);
2454       }
2455 
2456 
2457       Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
2458       __ ldub(flags_addr, data_val);
2459       __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
2460       __ stb(data_val, flags_addr);
2461       __ bind(profile_done);
2462     }
2463 
2464     Label done;
2465     // patching may screw with our temporaries on sparc,
2466     // so let's do it before loading the class
2467     if (k->is_loaded()) {
2468       jobject2reg(k->constant_encoding(), k_RInfo);
2469     } else {
2470       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
2471     }
2472     assert(obj != k_RInfo, "must be different");
2473     __ cmp(obj, 0);
2474     __ br(Assembler::equal, false, Assembler::pn, done);
2475     __ delayed()->nop();
2476 
2477     // get object class
2478     // not a safepoint as obj null check happens earlier
2479     load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
2480     if (op->fast_check()) {
2481       assert_different_registers(klass_RInfo, k_RInfo);
2482       __ cmp(k_RInfo, klass_RInfo);
2483       __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
2484       __ delayed()->nop();
2485       __ bind(done);
2486     } else {
2487       bool need_slow_path = true;
2488       if (k->is_loaded()) {
2489         if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
2490           need_slow_path = false;
2491         // perform the fast part of the checking logic
2492         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
2493                                          (need_slow_path ? &done : NULL),
2494                                          stub->entry(), NULL,
2495                                          RegisterOrConstant(k->super_check_offset()));
2496       } else {
2497         // perform the fast part of the checking logic
2498         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
2499                                          &done, stub->entry(), NULL);
2500       }
2501       if (need_slow_path) {
2502         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2503         assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2504         __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2505         __ delayed()->nop();
2506         __ cmp(G3, 0);
2507         __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
2508         __ delayed()->nop();
2509       }
2510       __ bind(done);
2511     }
2512     __ mov(obj, dst);
2513   } else if (code == lir_instanceof) {
2514     Register obj = op->object()->as_register();
2515     Register k_RInfo = op->tmp1()->as_register();
2516     Register klass_RInfo = op->tmp2()->as_register();
2517     Register dst = op->result_opr()->as_register();
2518     Register Rtmp1 = op->tmp3()->as_register();
2519     ciKlass* k = op->klass();
2520 
2521     Label done;
2522     if (obj == k_RInfo) {
2523       k_RInfo = klass_RInfo;
2524       klass_RInfo = obj;
2525     }
2526     // patching may screw with our temporaries on sparc,
2527     // so let's do it before loading the class
2528     if (k->is_loaded()) {
2529       jobject2reg(k->constant_encoding(), k_RInfo);
2530     } else {
2531       jobject2reg_with_patching(k_RInfo, op->info_for_patch());
2532     }
2533     assert(obj != k_RInfo, "must be different");
2534     __ cmp(obj, 0);
2535     __ br(Assembler::equal, true, Assembler::pn, done);
2536     __ delayed()->set(0, dst);
2537 
2538     // get object class
2539     // not a safepoint as obj null check happens earlier
2540     load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
2541     if (op->fast_check()) {
2542       __ cmp(k_RInfo, klass_RInfo);
2543       __ br(Assembler::equal, true, Assembler::pt, done);
2544       __ delayed()->set(1, dst);
2545       __ set(0, dst);
2546       __ bind(done);
2547     } else {
2548       bool need_slow_path = true;
2549       if (k->is_loaded()) {
2550         if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
2551           need_slow_path = false;
2552         // perform the fast part of the checking logic
2553         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
2554                                          (need_slow_path ? &done : NULL),
2555                                          (need_slow_path ? &done : NULL), NULL,
2556                                          RegisterOrConstant(k->super_check_offset()),
2557                                          dst);
2558       } else {
2559         assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
2560         // perform the fast part of the checking logic
2561         __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
2562                                          &done, &done, NULL,
2563                                          RegisterOrConstant(-1),
2564                                          dst);
2565       }
2566       if (need_slow_path) {
2567         // call out-of-line instance of __ check_klass_subtype_slow_path(...):
2568         assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
2569         __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
2570         __ delayed()->nop();
2571         __ mov(G3, dst);
2572       }
2573       __ bind(done);
2574     }
2575   } else {
2576     ShouldNotReachHere();
2577   }
2578 
2579 }
2580 
2581 
2582 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
2583   if (op->code() == lir_cas_long) {
2584     assert(VM_Version::supports_cx8(), "wrong machine");
2585     Register addr = op->addr()->as_pointer_register();
2586     Register cmp_value_lo = op->cmp_value()->as_register_lo();
2587     Register cmp_value_hi = op->cmp_value()->as_register_hi();
2588     Register new_value_lo = op->new_value()->as_register_lo();
2589     Register new_value_hi = op->new_value()->as_register_hi();
2590     Register t1 = op->tmp1()->as_register();
2591     Register t2 = op->tmp2()->as_register();
2592 #ifdef _LP64
2593     __ mov(cmp_value_lo, t1);
2594     __ mov(new_value_lo, t2);
2595 #else
2596     // move high and low halves of long values into single registers
2597     __ sllx(cmp_value_hi, 32, t1);         // shift high half into temp reg
2598     __ srl(cmp_value_lo, 0, cmp_value_lo); // clear upper 32 bits of low half
2599     __ or3(t1, cmp_value_lo, t1);          // t1 holds 64-bit compare value
2600     __ sllx(new_value_hi, 32, t2);
2601     __ srl(new_value_lo, 0, new_value_lo);
2602     __ or3(t2, new_value_lo, t2);          // t2 holds 64-bit value to swap
2603 #endif
2604     // perform the compare and swap operation
2605     __ casx(addr, t1, t2);
2606     // generate condition code - if the swap succeeded, t2 ("new value" reg) was
2607     // overwritten with the original value in "addr" and will be equal to t1.
2608     __ cmp(t1, t2);
2609 
2610   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
2611     Register addr = op->addr()->as_pointer_register();
2612     Register cmp_value = op->cmp_value()->as_register();
2613     Register new_value = op->new_value()->as_register();
2614     Register t1 = op->tmp1()->as_register();
2615     Register t2 = op->tmp2()->as_register();
2616     __ mov(cmp_value, t1);
2617     __ mov(new_value, t2);
2618 #ifdef _LP64
2619     if (op->code() == lir_cas_obj) {
2620       __ casx(addr, t1, t2);
2621     } else
2622 #endif
2623       {
2624         __ cas(addr, t1, t2);
2625       }
2626     __ cmp(t1, t2);
2627   } else {
2628     Unimplemented();
2629   }
2630 }
2631 
2632 void LIR_Assembler::set_24bit_FPU() {
2633   Unimplemented();
2634 }
2635 
2636 
2637 void LIR_Assembler::reset_FPU() {
2638   Unimplemented();
2639 }
2640 
2641 
2642 void LIR_Assembler::breakpoint() {
2643   __ breakpoint_trap();
2644 }
2645 
2646 
2647 void LIR_Assembler::push(LIR_Opr opr) {
2648   Unimplemented();
2649 }
2650 
2651 
2652 void LIR_Assembler::pop(LIR_Opr opr) {
2653   Unimplemented();
2654 }
2655 
2656 
2657 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) {
2658   Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no);
2659   Register dst = dst_opr->as_register();
2660   Register reg = mon_addr.base();
2661   int offset = mon_addr.disp();
2662   // compute pointer to BasicLock
2663   if (mon_addr.is_simm13()) {
2664     __ add(reg, offset, dst);
2665   } else {
2666     __ set(offset, dst);
2667     __ add(dst, reg, dst);
2668   }
2669 }
2670 
2671 
2672 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2673   Register obj = op->obj_opr()->as_register();
2674   Register hdr = op->hdr_opr()->as_register();
2675   Register lock = op->lock_opr()->as_register();
2676 
2677   // obj may not be an oop
2678   if (op->code() == lir_lock) {
2679     MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
2680     if (UseFastLocking) {
2681       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2682       // add debug info for NullPointerException only if one is possible
2683       if (op->info() != NULL) {
2684         add_debug_info_for_null_check_here(op->info());
2685       }
2686       __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
2687     } else {
2688       // always do slow locking
2689       // note: the slow locking code could be inlined here, however if we use
2690       //       slow locking, speed doesn't matter anyway and this solution is
2691       //       simpler and requires less duplicated code - additionally, the
2692       //       slow locking code is the same in either case which simplifies
2693       //       debugging
2694       __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2695       __ delayed()->nop();
2696     }
2697   } else {
2698     assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2699     if (UseFastLocking) {
2700       assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2701       __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2702     } else {
2703       // always do slow unlocking
2704       // note: the slow unlocking code could be inlined here, however if we use
2705       //       slow unlocking, speed doesn't matter anyway and this solution is
2706       //       simpler and requires less duplicated code - additionally, the
2707       //       slow unlocking code is the same in either case which simplifies
2708       //       debugging
2709       __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2710       __ delayed()->nop();
2711     }
2712   }
2713   __ bind(*op->stub()->continuation());
2714 }
2715 
2716 
2717 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2718   ciMethod* method = op->profiled_method();
2719   int bci          = op->profiled_bci();
2720 
2721   // Update counter for all call types
2722   ciMethodData* md = method->method_data();
2723   if (md == NULL) {
2724     bailout("out of memory building methodDataOop");
2725     return;
2726   }
2727   ciProfileData* data = md->bci_to_data(bci);
2728   assert(data->is_CounterData(), "need CounterData for calls");
2729   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2730   assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2731   Register mdo  = op->mdo()->as_register();
2732   Register tmp1 = op->tmp1()->as_register();
2733   jobject2reg(md->constant_encoding(), mdo);
2734   int mdo_offset_bias = 0;
2735   if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
2736                             data->size_in_bytes())) {
2737     // The offset is large so bias the mdo by the base of the slot so
2738     // that the ld can use simm13s to reference the slots of the data
2739     mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset());
2740     __ set(mdo_offset_bias, O7);
2741     __ add(mdo, O7, mdo);
2742   }
2743 
2744   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
2745   Bytecodes::Code bc = method->java_code_at_bci(bci);
2746   // Perform additional virtual call profiling for invokevirtual and
2747   // invokeinterface bytecodes
2748   if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
2749       Tier1ProfileVirtualCalls) {
2750     assert(op->recv()->is_single_cpu(), "recv must be allocated");
2751     Register recv = op->recv()->as_register();
2752     assert_different_registers(mdo, tmp1, recv);
2753     assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
2754     ciKlass* known_klass = op->known_holder();
2755     if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
2756       // We know the type that will be seen at this call site; we can
2757       // statically update the methodDataOop rather than needing to do
2758       // dynamic tests on the receiver type
2759 
2760       // NOTE: we should probably put a lock around this search to
2761       // avoid collisions by concurrent compilations
2762       ciVirtualCallData* vc_data = (ciVirtualCallData*) data;
2763       uint i;
2764       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2765         ciKlass* receiver = vc_data->receiver(i);
2766         if (known_klass->equals(receiver)) {
2767           Address data_addr(mdo, md->byte_offset_of_slot(data,
2768                                                          VirtualCallData::receiver_count_offset(i)) -
2769                             mdo_offset_bias);
2770           __ lduw(data_addr, tmp1);
2771           __ add(tmp1, DataLayout::counter_increment, tmp1);
2772           __ stw(tmp1, data_addr);
2773           return;
2774         }
2775       }
2776 
2777       // Receiver type not found in profile data; select an empty slot
2778 
2779       // Note that this is less efficient than it should be because it
2780       // always does a write to the receiver part of the
2781       // VirtualCallData rather than just the first time
2782       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2783         ciKlass* receiver = vc_data->receiver(i);
2784         if (receiver == NULL) {
2785           Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2786                             mdo_offset_bias);
2787           jobject2reg(known_klass->constant_encoding(), tmp1);
2788           __ st_ptr(tmp1, recv_addr);
2789           Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2790                             mdo_offset_bias);
2791           __ lduw(data_addr, tmp1);
2792           __ add(tmp1, DataLayout::counter_increment, tmp1);
2793           __ stw(tmp1, data_addr);
2794           return;
2795         }
2796       }
2797     } else {
2798       load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
2799       Label update_done;
2800       uint i;
2801       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2802         Label next_test;
2803         // See if the receiver is receiver[n].
2804         Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2805                               mdo_offset_bias);
2806         __ ld_ptr(receiver_addr, tmp1);
2807         __ verify_oop(tmp1);
2808         __ cmp(recv, tmp1);
2809         __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
2810         __ delayed()->nop();
2811         Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2812                           mdo_offset_bias);
2813         __ lduw(data_addr, tmp1);
2814         __ add(tmp1, DataLayout::counter_increment, tmp1);
2815         __ stw(tmp1, data_addr);
2816         __ br(Assembler::always, false, Assembler::pt, update_done);
2817         __ delayed()->nop();
2818         __ bind(next_test);
2819       }
2820 
2821       // Didn't find receiver; find next empty slot and fill it in
2822       for (i = 0; i < VirtualCallData::row_limit(); i++) {
2823         Label next_test;
2824         Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
2825                           mdo_offset_bias);
2826         load(recv_addr, tmp1, T_OBJECT);
2827         __ tst(tmp1);
2828         __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
2829         __ delayed()->nop();
2830         __ st_ptr(recv, recv_addr);
2831         __ set(DataLayout::counter_increment, tmp1);
2832         __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
2833                   mdo_offset_bias);
2834         __ br(Assembler::always, false, Assembler::pt, update_done);
2835         __ delayed()->nop();
2836         __ bind(next_test);
2837       }
2838       // Receiver did not match any saved receiver and there is no empty row for it.
2839       // Increment total counter to indicate polymorphic case.
2840       __ lduw(counter_addr, tmp1);
2841       __ add(tmp1, DataLayout::counter_increment, tmp1);
2842       __ stw(tmp1, counter_addr);
2843 
2844       __ bind(update_done);
2845     }
2846   } else {
2847     // Static call
2848     __ lduw(counter_addr, tmp1);
2849     __ add(tmp1, DataLayout::counter_increment, tmp1);
2850     __ stw(tmp1, counter_addr);
2851   }
2852 }
2853 
2854 
2855 void LIR_Assembler::align_backward_branch_target() {
2856   __ align(OptoLoopAlignment);
2857 }
2858 
2859 
2860 void LIR_Assembler::emit_delay(LIR_OpDelay* op) {
2861   // make sure we are expecting a delay
2862   // this has the side effect of clearing the delay state
2863   // so we can use _masm instead of _masm->delayed() to do the
2864   // code generation.
2865   __ delayed();
2866 
2867   // make sure we only emit one instruction
2868   int offset = code_offset();
2869   op->delay_op()->emit_code(this);
2870 #ifdef ASSERT
2871   if (code_offset() - offset != NativeInstruction::nop_instruction_size) {
2872     op->delay_op()->print();
2873   }
2874   assert(code_offset() - offset == NativeInstruction::nop_instruction_size,
2875          "only one instruction can go in a delay slot");
2876 #endif
2877 
2878   // we may also be emitting the call info for the instruction
2879   // which we are the delay slot of.
2880   CodeEmitInfo * call_info = op->call_info();
2881   if (call_info) {
2882     add_call_info(code_offset(), call_info);
2883   }
2884 
2885   if (VerifyStackAtCalls) {
2886     _masm->sub(FP, SP, O7);
2887     _masm->cmp(O7, initial_frame_size_in_bytes());
2888     _masm->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2 );
2889   }
2890 }
2891 
2892 
2893 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
2894   assert(left->is_register(), "can only handle registers");
2895 
2896   if (left->is_single_cpu()) {
2897     __ neg(left->as_register(), dest->as_register());
2898   } else if (left->is_single_fpu()) {
2899     __ fneg(FloatRegisterImpl::S, left->as_float_reg(), dest->as_float_reg());
2900   } else if (left->is_double_fpu()) {
2901     __ fneg(FloatRegisterImpl::D, left->as_double_reg(), dest->as_double_reg());
2902   } else {
2903     assert (left->is_double_cpu(), "Must be a long");
2904     Register Rlow = left->as_register_lo();
2905     Register Rhi = left->as_register_hi();
2906 #ifdef _LP64
2907     __ sub(G0, Rlow, dest->as_register_lo());
2908 #else
2909     __ subcc(G0, Rlow, dest->as_register_lo());
2910     __ subc (G0, Rhi,  dest->as_register_hi());
2911 #endif
2912   }
2913 }
2914 
2915 
2916 void LIR_Assembler::fxch(int i) {
2917   Unimplemented();
2918 }
2919 
2920 void LIR_Assembler::fld(int i) {
2921   Unimplemented();
2922 }
2923 
2924 void LIR_Assembler::ffree(int i) {
2925   Unimplemented();
2926 }
2927 
2928 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
2929                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2930 
2931   // if tmp is invalid, then the function being called doesn't destroy the thread
2932   if (tmp->is_valid()) {
2933     __ save_thread(tmp->as_register());
2934   }
2935   __ call(dest, relocInfo::runtime_call_type);
2936   __ delayed()->nop();
2937   if (info != NULL) {
2938     add_call_info_here(info);
2939   }
2940   if (tmp->is_valid()) {
2941     __ restore_thread(tmp->as_register());
2942   }
2943 
2944 #ifdef ASSERT
2945   __ verify_thread();
2946 #endif // ASSERT
2947 }
2948 
2949 
2950 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2951 #ifdef _LP64
2952   ShouldNotReachHere();
2953 #endif
2954 
2955   NEEDS_CLEANUP;
2956   if (type == T_LONG) {
2957     LIR_Address* mem_addr = dest->is_address() ? dest->as_address_ptr() : src->as_address_ptr();
2958 
2959     // (extended to allow indexed as well as constant displaced for JSR-166)
2960     Register idx = noreg; // contains either constant offset or index
2961 
2962     int disp = mem_addr->disp();
2963     if (mem_addr->index() == LIR_OprFact::illegalOpr) {
2964       if (!Assembler::is_simm13(disp)) {
2965         idx = O7;
2966         __ set(disp, idx);
2967       }
2968     } else {
2969       assert(disp == 0, "not both indexed and disp");
2970       idx = mem_addr->index()->as_register();
2971     }
2972 
2973     int null_check_offset = -1;
2974 
2975     Register base = mem_addr->base()->as_register();
2976     if (src->is_register() && dest->is_address()) {
2977       // G4 is high half, G5 is low half
2978       if (VM_Version::v9_instructions_work()) {
2979         // clear the top bits of G5, and scale up G4
2980         __ srl (src->as_register_lo(),  0, G5);
2981         __ sllx(src->as_register_hi(), 32, G4);
2982         // combine the two halves into the 64 bits of G4
2983         __ or3(G4, G5, G4);
2984         null_check_offset = __ offset();
2985         if (idx == noreg) {
2986           __ stx(G4, base, disp);
2987         } else {
2988           __ stx(G4, base, idx);
2989         }
2990       } else {
2991         __ mov (src->as_register_hi(), G4);
2992         __ mov (src->as_register_lo(), G5);
2993         null_check_offset = __ offset();
2994         if (idx == noreg) {
2995           __ std(G4, base, disp);
2996         } else {
2997           __ std(G4, base, idx);
2998         }
2999       }
3000     } else if (src->is_address() && dest->is_register()) {
3001       null_check_offset = __ offset();
3002       if (VM_Version::v9_instructions_work()) {
3003         if (idx == noreg) {
3004           __ ldx(base, disp, G5);
3005         } else {
3006           __ ldx(base, idx, G5);
3007         }
3008         __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
3009         __ mov (G5, dest->as_register_lo());     // copy low half into lo
3010       } else {
3011         if (idx == noreg) {
3012           __ ldd(base, disp, G4);
3013         } else {
3014           __ ldd(base, idx, G4);
3015         }
3016         // G4 is high half, G5 is low half
3017         __ mov (G4, dest->as_register_hi());
3018         __ mov (G5, dest->as_register_lo());
3019       }
3020     } else {
3021       Unimplemented();
3022     }
3023     if (info != NULL) {
3024       add_debug_info_for_null_check(null_check_offset, info);
3025     }
3026 
3027   } else {
3028     // use normal move for all other volatiles since they don't need
3029     // special handling to remain atomic.
3030     move_op(src, dest, type, lir_patch_none, info, false, false);
3031   }
3032 }
3033 
3034 void LIR_Assembler::membar() {
3035   // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode
3036   __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3037 }
3038 
3039 void LIR_Assembler::membar_acquire() {
3040   // no-op on TSO
3041 }
3042 
3043 void LIR_Assembler::membar_release() {
3044   // no-op on TSO
3045 }
3046 
3047 // Macro to Pack two sequential registers containing 32 bit values
3048 // into a single 64 bit register.
3049 // rs and rs->successor() are packed into rd
3050 // rd and rs may be the same register.
3051 // Note: rs and rs->successor() are destroyed.
3052 void LIR_Assembler::pack64( Register rs, Register rd ) {
3053   __ sllx(rs, 32, rs);
3054   __ srl(rs->successor(), 0, rs->successor());
3055   __ or3(rs, rs->successor(), rd);
3056 }
3057 
3058 // Macro to unpack a 64 bit value in a register into
3059 // two sequential registers.
3060 // rd is unpacked into rd and rd->successor()
3061 void LIR_Assembler::unpack64( Register rd ) {
3062   __ mov(rd, rd->successor());
3063   __ srax(rd, 32, rd);
3064   __ sra(rd->successor(), 0, rd->successor());
3065 }
3066 
3067 
3068 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
3069   LIR_Address* addr = addr_opr->as_address_ptr();
3070   assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
3071   __ add(addr->base()->as_register(), addr->disp(), dest->as_register());
3072 }
3073 
3074 
3075 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3076   assert(result_reg->is_register(), "check");
3077   __ mov(G2_thread, result_reg->as_register());
3078 }
3079 
3080 
3081 void LIR_Assembler::peephole(LIR_List* lir) {
3082   LIR_OpList* inst = lir->instructions_list();
3083   for (int i = 0; i < inst->length(); i++) {
3084     LIR_Op* op = inst->at(i);
3085     switch (op->code()) {
3086       case lir_cond_float_branch:
3087       case lir_branch: {
3088         LIR_OpBranch* branch = op->as_OpBranch();
3089         assert(branch->info() == NULL, "shouldn't be state on branches anymore");
3090         LIR_Op* delay_op = NULL;
3091         // we'd like to be able to pull following instructions into
3092         // this slot but we don't know enough to do it safely yet so
3093         // only optimize block to block control flow.
3094         if (LIRFillDelaySlots && branch->block()) {
3095           LIR_Op* prev = inst->at(i - 1);
3096           if (prev && LIR_Assembler::is_single_instruction(prev) && prev->info() == NULL) {
3097             // swap previous instruction into delay slot
3098             inst->at_put(i - 1, op);
3099             inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3100 #ifndef PRODUCT
3101             if (LIRTracePeephole) {
3102               tty->print_cr("delayed");
3103               inst->at(i - 1)->print();
3104               inst->at(i)->print();
3105             }
3106 #endif
3107             continue;
3108           }
3109         }
3110 
3111         if (!delay_op) {
3112           delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), NULL);
3113         }
3114         inst->insert_before(i + 1, delay_op);
3115         break;
3116       }
3117       case lir_static_call:
3118       case lir_virtual_call:
3119       case lir_icvirtual_call:
3120       case lir_optvirtual_call: {
3121         LIR_Op* delay_op = NULL;
3122         LIR_Op* prev = inst->at(i - 1);
3123         if (LIRFillDelaySlots && prev && prev->code() == lir_move && prev->info() == NULL &&
3124             (op->code() != lir_virtual_call ||
3125              !prev->result_opr()->is_single_cpu() ||
3126              prev->result_opr()->as_register() != O0) &&
3127             LIR_Assembler::is_single_instruction(prev)) {
3128           // Only moves without info can be put into the delay slot.
3129           // Also don't allow the setup of the receiver in the delay
3130           // slot for vtable calls.
3131           inst->at_put(i - 1, op);
3132           inst->at_put(i, new LIR_OpDelay(prev, op->info()));
3133 #ifndef PRODUCT
3134           if (LIRTracePeephole) {
3135             tty->print_cr("delayed");
3136             inst->at(i - 1)->print();
3137             inst->at(i)->print();
3138           }
3139 #endif
3140           continue;
3141         }
3142 
3143         if (!delay_op) {
3144           delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
3145           inst->insert_before(i + 1, delay_op);
3146         }
3147         break;
3148       }
3149     }
3150   }
3151 }
3152 
3153 
3154 
3155 
3156 #undef __