< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page




  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/cardTableBarrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"

  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "vmreg_aarch64.inline.hpp"
  46 
  47 
  48 
  49 #ifndef PRODUCT
  50 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  51 #else
  52 #define COMMENT(x)
  53 #endif
  54 
  55 NEEDS_CLEANUP // remove this definitions ?
  56 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  57 const Register SYNC_header = r0;   // synchronization header
  58 const Register SHIFT_count = r0;   // where count for shift operations must be
  59 
  60 #define __ _masm->
  61 
  62 


 225   // FIXME: This needs to be much more clever.  See x86.
 226 }
 227 
 228 
 229 void LIR_Assembler::osr_entry() {
 230   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 231   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 232   ValueStack* entry_state = osr_entry->state();
 233   int number_of_locks = entry_state->locks_size();
 234 
 235   // we jump here if osr happens with the interpreter
 236   // state set up to continue at the beginning of the
 237   // loop that triggered osr - in particular, we have
 238   // the following registers setup:
 239   //
 240   // r2: osr buffer
 241   //
 242 
 243   // build frame
 244   ciMethod* m = compilation()->method();
 245   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
 246 
 247   // OSR buffer is
 248   //
 249   // locals[nlocals-1..0]
 250   // monitors[0..number_of_locks]
 251   //
 252   // locals is a direct copy of the interpreter frame so in the osr buffer
 253   // so first slot in the local array is the last local from the interpreter
 254   // and last slot is local[0] (receiver) from the interpreter
 255   //
 256   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 257   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 258   // in the interpreter frame (the method lock if a sync method)
 259 
 260   // Initialize monitors in the compiled activation.
 261   //   r2: pointer to osr buffer
 262   //
 263   // All other registers are dead at this point and the locals will be
 264   // copied into place by code emitted in the IR.
 265 


 435     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 436     __ unlock_object(r5, r4, r0, *stub->entry());
 437     __ bind(*stub->continuation());
 438   }
 439 
 440   if (compilation()->env()->dtrace_method_probes()) {
 441     __ call_Unimplemented();
 442 #if 0
 443     __ movptr(Address(rsp, 0), rax);
 444     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 445     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 446 #endif
 447   }
 448 
 449   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 450     __ mov(r0, r19);  // Restore the exception
 451   }
 452 
 453   // remove the activation and dispatch to the unwind handler
 454   __ block_comment("remove_frame and dispatch to the unwind handler");
 455   __ remove_frame(initial_frame_size_in_bytes());
 456   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 457 
 458   // Emit the slow path assembly
 459   if (stub != NULL) {
 460     stub->emit_code(this);
 461   }
 462 
 463   return offset;
 464 }
 465 
 466 
 467 int LIR_Assembler::emit_deopt_handler() {
 468   // if the last instruction is a call (typically to do a throw which
 469   // is coming at the end after block reordering) the return address
 470   // must still point into the code area in order to avoid assertion
 471   // failures when searching for the corresponding bci => add a nop
 472   // (was bug 5/14/1999 - gri)
 473   __ nop();
 474 
 475   // generate code for exception handler


 486   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 487   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 488   __ end_a_stub();
 489 
 490   return offset;
 491 }
 492 
 493 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 494   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 495   int pc_offset = code_offset();
 496   flush_debug_info(pc_offset);
 497   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 498   if (info->exception_handlers() != NULL) {
 499     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 500   }
 501 }
 502 
 503 void LIR_Assembler::return_op(LIR_Opr result) {
 504   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 505 

 506   // Pop the stack before the safepoint code
 507   __ remove_frame(initial_frame_size_in_bytes());
 508 
 509   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 510     __ reserved_stack_check();
 511   }
 512 
 513   address polling_page(os::get_polling_page());
 514   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 515   __ ret(lr);
 516 }
 517 




 518 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 519   address polling_page(os::get_polling_page());
 520   guarantee(info != NULL, "Shouldn't be NULL");
 521   assert(os::is_poll_address(polling_page), "should be");
 522   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 523   add_debug_info_for_branch(info);  // This isn't just debug info:
 524                                     // it's the oop map
 525   __ read_polling_page(rscratch1, relocInfo::poll_type);
 526   return __ offset();
 527 }
 528 
 529 
 530 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 531   if (from_reg == r31_sp)
 532     from_reg = sp;
 533   if (to_reg == r31_sp)
 534     to_reg = sp;
 535   __ mov(to_reg, from_reg);
 536 }
 537 


 545 
 546   switch (c->type()) {
 547     case T_INT: {
 548       assert(patch_code == lir_patch_none, "no patching handled here");
 549       __ movw(dest->as_register(), c->as_jint());
 550       break;
 551     }
 552 
 553     case T_ADDRESS: {
 554       assert(patch_code == lir_patch_none, "no patching handled here");
 555       __ mov(dest->as_register(), c->as_jint());
 556       break;
 557     }
 558 
 559     case T_LONG: {
 560       assert(patch_code == lir_patch_none, "no patching handled here");
 561       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 562       break;
 563     }
 564 

 565     case T_OBJECT: {
 566         if (patch_code == lir_patch_none) {
 567           jobject2reg(c->as_jobject(), dest->as_register());
 568         } else {
 569           jobject2reg_with_patching(dest->as_register(), info);


 570         }
 571       break;
 572     }
 573 
 574     case T_METADATA: {
 575       if (patch_code != lir_patch_none) {
 576         klass2reg_with_patching(dest->as_register(), info);
 577       } else {
 578         __ mov_metadata(dest->as_register(), c->as_metadata());
 579       }
 580       break;
 581     }
 582 
 583     case T_FLOAT: {
 584       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 585         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 586       } else {
 587         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 588         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 589       }


 591     }
 592 
 593     case T_DOUBLE: {
 594       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 595         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 596       } else {
 597         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 598         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 599       }
 600       break;
 601     }
 602 
 603     default:
 604       ShouldNotReachHere();
 605   }
 606 }
 607 
 608 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 609   LIR_Const* c = src->as_constant_ptr();
 610   switch (c->type()) {

 611   case T_OBJECT:
 612     {
 613       if (! c->as_jobject())
 614         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 615       else {
 616         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 617         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 618       }
 619     }
 620     break;
 621   case T_ADDRESS:
 622     {
 623       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 624       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 625     }
 626   case T_INT:
 627   case T_FLOAT:
 628     {
 629       Register reg = zr;
 630       if (c->as_jint_bits() == 0)


 657 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 658   assert(src->is_constant(), "should not call otherwise");
 659   LIR_Const* c = src->as_constant_ptr();
 660   LIR_Address* to_addr = dest->as_address_ptr();
 661 
 662   void (Assembler::* insn)(Register Rt, const Address &adr);
 663 
 664   switch (type) {
 665   case T_ADDRESS:
 666     assert(c->as_jint() == 0, "should be");
 667     insn = &Assembler::str;
 668     break;
 669   case T_LONG:
 670     assert(c->as_jlong() == 0, "should be");
 671     insn = &Assembler::str;
 672     break;
 673   case T_INT:
 674     assert(c->as_jint() == 0, "should be");
 675     insn = &Assembler::strw;
 676     break;

 677   case T_OBJECT:
 678   case T_ARRAY:
 679     assert(c->as_jobject() == 0, "should be");
 680     if (UseCompressedOops && !wide) {
 681       insn = &Assembler::strw;
 682     } else {
 683       insn = &Assembler::str;
 684     }
 685     break;
 686   case T_CHAR:
 687   case T_SHORT:
 688     assert(c->as_jint() == 0, "should be");
 689     insn = &Assembler::strh;
 690     break;
 691   case T_BOOLEAN:
 692   case T_BYTE:
 693     assert(c->as_jint() == 0, "should be");
 694     insn = &Assembler::strb;
 695     break;
 696   default:
 697     ShouldNotReachHere();
 698     insn = &Assembler::str;  // unreachable
 699   }
 700 
 701   if (info) add_debug_info_for_null_check_here(info);
 702   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 703 }
 704 
 705 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 706   assert(src->is_register(), "should not call otherwise");
 707   assert(dest->is_register(), "should not call otherwise");
 708 
 709   // move between cpu-registers
 710   if (dest->is_single_cpu()) {
 711     if (src->type() == T_LONG) {
 712       // Can do LONG -> OBJECT
 713       move_regs(src->as_register_lo(), dest->as_register());
 714       return;
 715     }
 716     assert(src->is_single_cpu(), "must match");
 717     if (src->type() == T_OBJECT) {
 718       __ verify_oop(src->as_register());
 719     }
 720     move_regs(src->as_register(), dest->as_register());
 721 
 722   } else if (dest->is_double_cpu()) {
 723     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 724       // Surprising to me but we can see move of a long to t_object
 725       __ verify_oop(src->as_register());
 726       move_regs(src->as_register(), dest->as_register_lo());
 727       return;
 728     }
 729     assert(src->is_double_cpu(), "must match");
 730     Register f_lo = src->as_register_lo();
 731     Register f_hi = src->as_register_hi();
 732     Register t_lo = dest->as_register_lo();
 733     Register t_hi = dest->as_register_hi();
 734     assert(f_hi == f_lo, "must be same");
 735     assert(t_hi == t_lo, "must be same");
 736     move_regs(f_lo, t_lo);
 737 
 738   } else if (dest->is_single_fpu()) {
 739     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 740 
 741   } else if (dest->is_double_fpu()) {
 742     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 743 
 744   } else {
 745     ShouldNotReachHere();
 746   }
 747 }
 748 
 749 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 750   if (src->is_single_cpu()) {
 751     if (type == T_ARRAY || type == T_OBJECT) {
 752       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 753       __ verify_oop(src->as_register());
 754     } else if (type == T_METADATA || type == T_DOUBLE) {
 755       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 756     } else {
 757       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 758     }
 759 
 760   } else if (src->is_double_cpu()) {
 761     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 762     __ str(src->as_register_lo(), dest_addr_LO);
 763 
 764   } else if (src->is_single_fpu()) {
 765     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 766     __ strs(src->as_float_reg(), dest_addr);
 767 
 768   } else if (src->is_double_fpu()) {
 769     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 770     __ strd(src->as_double_reg(), dest_addr);
 771 
 772   } else {
 773     ShouldNotReachHere();
 774   }
 775 
 776 }
 777 
 778 
 779 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 780   LIR_Address* to_addr = dest->as_address_ptr();
 781   PatchingStub* patch = NULL;
 782   Register compressed_src = rscratch1;
 783 
 784   if (patch_code != lir_patch_none) {
 785     deoptimize_trap(info);
 786     return;
 787   }
 788 
 789   if (type == T_ARRAY || type == T_OBJECT) {
 790     __ verify_oop(src->as_register());
 791 
 792     if (UseCompressedOops && !wide) {
 793       __ encode_heap_oop(compressed_src, src->as_register());
 794     } else {
 795       compressed_src = src->as_register();
 796     }
 797   }
 798 
 799   int null_check_here = code_offset();
 800   switch (type) {
 801     case T_FLOAT: {
 802       __ strs(src->as_float_reg(), as_Address(to_addr));
 803       break;
 804     }
 805 
 806     case T_DOUBLE: {
 807       __ strd(src->as_double_reg(), as_Address(to_addr));
 808       break;
 809     }
 810 

 811     case T_ARRAY:   // fall through
 812     case T_OBJECT:  // fall through
 813       if (UseCompressedOops && !wide) {
 814         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 815       } else {
 816          __ str(compressed_src, as_Address(to_addr));
 817       }
 818       break;
 819     case T_METADATA:
 820       // We get here to store a method pointer to the stack to pass to
 821       // a dtrace runtime call. This can't work on 64 bit with
 822       // compressed klass ptrs: T_METADATA can be a compressed klass
 823       // ptr or a 64 bit method pointer.
 824       ShouldNotReachHere();
 825       __ str(src->as_register(), as_Address(to_addr));
 826       break;
 827     case T_ADDRESS:
 828       __ str(src->as_register(), as_Address(to_addr));
 829       break;
 830     case T_INT:


 844 
 845     case T_CHAR:    // fall through
 846     case T_SHORT:
 847       __ strh(src->as_register(), as_Address(to_addr));
 848       break;
 849 
 850     default:
 851       ShouldNotReachHere();
 852   }
 853   if (info != NULL) {
 854     add_debug_info_for_null_check(null_check_here, info);
 855   }
 856 }
 857 
 858 
 859 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 860   assert(src->is_stack(), "should not call otherwise");
 861   assert(dest->is_register(), "should not call otherwise");
 862 
 863   if (dest->is_single_cpu()) {
 864     if (type == T_ARRAY || type == T_OBJECT) {
 865       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 866       __ verify_oop(dest->as_register());
 867     } else if (type == T_METADATA) {
 868       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 869     } else {
 870       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 871     }
 872 
 873   } else if (dest->is_double_cpu()) {
 874     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 875     __ ldr(dest->as_register_lo(), src_addr_LO);
 876 
 877   } else if (dest->is_single_fpu()) {
 878     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 879     __ ldrs(dest->as_float_reg(), src_addr);
 880 
 881   } else if (dest->is_double_fpu()) {
 882     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 883     __ ldrd(dest->as_double_reg(), src_addr);
 884 


 916   add_call_info_here(info);
 917 }
 918 
 919 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 920 
 921   LIR_Opr temp;
 922   if (type == T_LONG || type == T_DOUBLE)
 923     temp = FrameMap::rscratch1_long_opr;
 924   else
 925     temp = FrameMap::rscratch1_opr;
 926 
 927   stack2reg(src, temp, src->type());
 928   reg2stack(temp, dest, dest->type(), false);
 929 }
 930 
 931 
 932 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 933   LIR_Address* addr = src->as_address_ptr();
 934   LIR_Address* from_addr = src->as_address_ptr();
 935 
 936   if (addr->base()->type() == T_OBJECT) {
 937     __ verify_oop(addr->base()->as_pointer_register());
 938   }
 939 
 940   if (patch_code != lir_patch_none) {
 941     deoptimize_trap(info);
 942     return;
 943   }
 944 
 945   if (info != NULL) {
 946     add_debug_info_for_null_check_here(info);
 947   }
 948   int null_check_here = code_offset();
 949   switch (type) {
 950     case T_FLOAT: {
 951       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 952       break;
 953     }
 954 
 955     case T_DOUBLE: {
 956       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 957       break;
 958     }
 959 

 960     case T_ARRAY:   // fall through
 961     case T_OBJECT:  // fall through
 962       if (UseCompressedOops && !wide) {
 963         __ ldrw(dest->as_register(), as_Address(from_addr));
 964       } else {
 965          __ ldr(dest->as_register(), as_Address(from_addr));
 966       }
 967       break;
 968     case T_METADATA:
 969       // We get here to store a method pointer to the stack to pass to
 970       // a dtrace runtime call. This can't work on 64 bit with
 971       // compressed klass ptrs: T_METADATA can be a compressed klass
 972       // ptr or a 64 bit method pointer.
 973       ShouldNotReachHere();
 974       __ ldr(dest->as_register(), as_Address(from_addr));
 975       break;
 976     case T_ADDRESS:
 977       // FIXME: OMG this is a horrible kludge.  Any offset from an
 978       // address that matches klass_offset_in_bytes() will be loaded
 979       // as a word, not a long.


 994 
 995     case T_BYTE:
 996       __ ldrsb(dest->as_register(), as_Address(from_addr));
 997       break;
 998     case T_BOOLEAN: {
 999       __ ldrb(dest->as_register(), as_Address(from_addr));
1000       break;
1001     }
1002 
1003     case T_CHAR:
1004       __ ldrh(dest->as_register(), as_Address(from_addr));
1005       break;
1006     case T_SHORT:
1007       __ ldrsh(dest->as_register(), as_Address(from_addr));
1008       break;
1009 
1010     default:
1011       ShouldNotReachHere();
1012   }
1013 
1014   if (type == T_ARRAY || type == T_OBJECT) {
1015     if (UseCompressedOops && !wide) {
1016       __ decode_heap_oop(dest->as_register());
1017     }
1018 
1019     if (!UseZGC) {
1020       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1021       __ verify_oop(dest->as_register());
1022     }
1023   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1024     if (UseCompressedClassPointers) {

1025       __ decode_klass_not_null(dest->as_register());


1026     }
1027   }
1028 }
1029 














1030 
1031 int LIR_Assembler::array_element_size(BasicType type) const {
1032   int elem_size = type2aelembytes(type);
1033   return exact_log2(elem_size);
1034 }
1035 
1036 
1037 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1038   switch (op->code()) {
1039   case lir_idiv:
1040   case lir_irem:
1041     arithmetic_idiv(op->code(),
1042                     op->in_opr1(),
1043                     op->in_opr2(),
1044                     op->in_opr3(),
1045                     op->result_opr(),
1046                     op->info());
1047     break;
1048   case lir_fmad:
1049     __ fmaddd(op->result_opr()->as_double_reg(),


1201     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1202                                InstanceKlass::init_state_offset()));
1203     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1204     add_debug_info_for_null_check_here(op->stub()->info());
1205     __ br(Assembler::NE, *op->stub()->entry());
1206   }
1207   __ allocate_object(op->obj()->as_register(),
1208                      op->tmp1()->as_register(),
1209                      op->tmp2()->as_register(),
1210                      op->header_size(),
1211                      op->object_size(),
1212                      op->klass()->as_register(),
1213                      *op->stub()->entry());
1214   __ bind(*op->stub()->continuation());
1215 }
1216 
1217 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1218   Register len =  op->len()->as_register();
1219   __ uxtw(len, len);
1220 
1221   if (UseSlowPath ||
1222       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1223       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1224     __ b(*op->stub()->entry());
1225   } else {
1226     Register tmp1 = op->tmp1()->as_register();
1227     Register tmp2 = op->tmp2()->as_register();
1228     Register tmp3 = op->tmp3()->as_register();
1229     if (len == tmp1) {
1230       tmp1 = tmp3;
1231     } else if (len == tmp2) {
1232       tmp2 = tmp3;
1233     } else if (len == tmp3) {
1234       // everything is ok
1235     } else {
1236       __ mov(tmp3, len);
1237     }
1238     __ allocate_array(op->obj()->as_register(),
1239                       len,
1240                       tmp1,
1241                       tmp2,


1513     __ bind(success);
1514     if (dst != obj) {
1515       __ mov(dst, obj);
1516     }
1517   } else if (code == lir_instanceof) {
1518     Register obj = op->object()->as_register();
1519     Register dst = op->result_opr()->as_register();
1520     Label success, failure, done;
1521     emit_typecheck_helper(op, &success, &failure, &failure);
1522     __ bind(failure);
1523     __ mov(dst, zr);
1524     __ b(done);
1525     __ bind(success);
1526     __ mov(dst, 1);
1527     __ bind(done);
1528   } else {
1529     ShouldNotReachHere();
1530   }
1531 }
1532 




















































































































1533 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1534   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1535   __ cset(rscratch1, Assembler::NE);
1536   __ membar(__ AnyAny);
1537 }
1538 
1539 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1540   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1541   __ cset(rscratch1, Assembler::NE);
1542   __ membar(__ AnyAny);
1543 }
1544 
1545 
1546 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1547   assert(VM_Version::supports_cx8(), "wrong machine");
1548   Register addr;
1549   if (op->addr()->is_register()) {
1550     addr = as_reg(op->addr());
1551   } else {
1552     assert(op->addr()->is_address(), "what else?");


1923       }
1924     }
1925   } else {
1926     Register rreg = right->as_register();
1927     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1928   }
1929 }
1930 
1931 
1932 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1933   if (opr1->is_constant() && opr2->is_single_cpu()) {
1934     // tableswitch
1935     Register reg = as_reg(opr2);
1936     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1937     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1938   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1939     Register reg1 = as_reg(opr1);
1940     if (opr2->is_single_cpu()) {
1941       // cpu register - cpu register
1942       Register reg2 = opr2->as_register();
1943       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1944         __ cmpoop(reg1, reg2);
1945       } else {
1946         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1947         __ cmpw(reg1, reg2);
1948       }
1949       return;
1950     }
1951     if (opr2->is_double_cpu()) {
1952       // cpu register - cpu register
1953       Register reg2 = opr2->as_register_lo();
1954       __ cmp(reg1, reg2);
1955       return;
1956     }
1957 
1958     if (opr2->is_constant()) {
1959       bool is_32bit = false; // width of register operand
1960       jlong imm;
1961 
1962       switch(opr2->type()) {
1963       case T_INT:
1964         imm = opr2->as_constant_ptr()->as_jint();
1965         is_32bit = true;
1966         break;
1967       case T_LONG:
1968         imm = opr2->as_constant_ptr()->as_jlong();
1969         break;
1970       case T_ADDRESS:
1971         imm = opr2->as_constant_ptr()->as_jint();
1972         break;

1973       case T_OBJECT:
1974       case T_ARRAY:
1975         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
1976         __ cmpoop(reg1, rscratch1);
1977         return;
1978       default:
1979         ShouldNotReachHere();
1980         imm = 0;  // unreachable
1981         break;
1982       }
1983 
1984       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
1985         if (is_32bit)
1986           __ cmpw(reg1, imm);
1987         else
1988           __ subs(zr, reg1, imm);
1989         return;
1990       } else {
1991         __ mov(rscratch1, imm);
1992         if (is_32bit)


2119   __ b(_unwind_handler_entry);
2120 }
2121 
2122 
2123 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2124   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2125   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2126 
2127   switch (left->type()) {
2128     case T_INT: {
2129       switch (code) {
2130       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2131       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2132       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2133       default:
2134         ShouldNotReachHere();
2135         break;
2136       }
2137       break;
2138     case T_LONG:

2139     case T_ADDRESS:
2140     case T_OBJECT:
2141       switch (code) {
2142       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2143       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2144       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2145       default:
2146         ShouldNotReachHere();
2147         break;
2148       }
2149       break;
2150     default:
2151       ShouldNotReachHere();
2152       break;
2153     }
2154   }
2155 }
2156 
2157 
2158 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2159   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2160   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2161 
2162   switch (left->type()) {
2163     case T_INT: {
2164       switch (code) {
2165       case lir_shl:  __ lslw (dreg, lreg, count); break;
2166       case lir_shr:  __ asrw (dreg, lreg, count); break;
2167       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2168       default:
2169         ShouldNotReachHere();
2170         break;
2171       }
2172       break;
2173     case T_LONG:
2174     case T_ADDRESS:

2175     case T_OBJECT:
2176       switch (code) {
2177       case lir_shl:  __ lsl (dreg, lreg, count); break;
2178       case lir_shr:  __ asr (dreg, lreg, count); break;
2179       case lir_ushr: __ lsr (dreg, lreg, count); break;
2180       default:
2181         ShouldNotReachHere();
2182         break;
2183       }
2184       break;
2185     default:
2186       ShouldNotReachHere();
2187       break;
2188     }
2189   }
2190 }
2191 
2192 
2193 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2194   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");


2199 
2200 
2201 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2202   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2203   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2204   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2205   __ mov (rscratch1, c);
2206   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2207 }
2208 
2209 
2210 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2211   ShouldNotReachHere();
2212   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2213   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2214   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2215   __ lea(rscratch1, __ constant_oop_address(o));
2216   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2217 }
2218 













2219 
2220 // This code replaces a call to arraycopy; no exception may
2221 // be thrown in this code, they must be thrown in the System.arraycopy
2222 // activation frame; we could save some checks if this would not be the case
2223 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2224   ciArrayKlass* default_type = op->expected_type();
2225   Register src = op->src()->as_register();
2226   Register dst = op->dst()->as_register();
2227   Register src_pos = op->src_pos()->as_register();
2228   Register dst_pos = op->dst_pos()->as_register();
2229   Register length  = op->length()->as_register();
2230   Register tmp = op->tmp()->as_register();
2231 
2232   __ resolve(ACCESS_READ, src);
2233   __ resolve(ACCESS_WRITE, dst);
2234 
2235   CodeStub* stub = op->stub();
2236   int flags = op->flags();
2237   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2238   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
















2239 
2240   // if we don't know anything, just go through the generic arraycopy
2241   if (default_type == NULL // || basic_type == T_OBJECT
2242       ) {
2243     Label done;
2244     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2245 
2246     // Save the arguments in case the generic arraycopy fails and we
2247     // have to fall back to the JNI stub
2248     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2249     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2250     __ str(src,              Address(sp, 4*BytesPerWord));
2251 
2252     address copyfunc_addr = StubRoutines::generic_arraycopy();
2253     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2254 
2255     // The arguments are in java calling convention so we shift them
2256     // to C convention
2257     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2258     __ mov(c_rarg0, j_rarg0);


2887 
2888 
2889 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2890   assert(!tmp->is_valid(), "don't need temporary");
2891 
2892   CodeBlob *cb = CodeCache::find_blob(dest);
2893   if (cb) {
2894     __ far_call(RuntimeAddress(dest));
2895   } else {
2896     __ mov(rscratch1, RuntimeAddress(dest));
2897     int len = args->length();
2898     int type = 0;
2899     if (! result->is_illegal()) {
2900       switch (result->type()) {
2901       case T_VOID:
2902         type = 0;
2903         break;
2904       case T_INT:
2905       case T_LONG:
2906       case T_OBJECT:

2907         type = 1;
2908         break;
2909       case T_FLOAT:
2910         type = 2;
2911         break;
2912       case T_DOUBLE:
2913         type = 3;
2914         break;
2915       default:
2916         ShouldNotReachHere();
2917         break;
2918       }
2919     }
2920     int num_gpargs = 0;
2921     int num_fpargs = 0;
2922     for (int i = 0; i < args->length(); i++) {
2923       LIR_Opr arg = args->at(i);
2924       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
2925         num_fpargs++;
2926       } else {


3153 #endif
3154 }
3155 
3156 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3157   Address addr = as_Address(src->as_address_ptr());
3158   BasicType type = src->type();
3159   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3160 
3161   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3162   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3163 
3164   switch(type) {
3165   case T_INT:
3166     xchg = &MacroAssembler::atomic_xchgalw;
3167     add = &MacroAssembler::atomic_addalw;
3168     break;
3169   case T_LONG:
3170     xchg = &MacroAssembler::atomic_xchgal;
3171     add = &MacroAssembler::atomic_addal;
3172     break;

3173   case T_OBJECT:
3174   case T_ARRAY:
3175     if (UseCompressedOops) {
3176       xchg = &MacroAssembler::atomic_xchgalw;
3177       add = &MacroAssembler::atomic_addalw;
3178     } else {
3179       xchg = &MacroAssembler::atomic_xchgal;
3180       add = &MacroAssembler::atomic_addal;
3181     }
3182     break;
3183   default:
3184     ShouldNotReachHere();
3185     xchg = &MacroAssembler::atomic_xchgal;
3186     add = &MacroAssembler::atomic_addal; // unreachable
3187   }
3188 
3189   switch (code) {
3190   case lir_xadd:
3191     {
3192       RegisterOrConstant inc;




  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "asm/assembler.hpp"
  29 #include "c1/c1_CodeStubs.hpp"
  30 #include "c1/c1_Compilation.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_MacroAssembler.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArrayKlass.hpp"
  36 #include "ci/ciInstance.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "gc/shared/barrierSet.hpp"
  39 #include "gc/shared/cardTableBarrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "nativeInst_aarch64.hpp"
  42 #include "oops/objArrayKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/frame.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 
  50 #ifndef PRODUCT
  51 #define COMMENT(x)   do { __ block_comment(x); } while (0)
  52 #else
  53 #define COMMENT(x)
  54 #endif
  55 
  56 NEEDS_CLEANUP // remove this definitions ?
  57 const Register IC_Klass    = rscratch2;   // where the IC klass is cached
  58 const Register SYNC_header = r0;   // synchronization header
  59 const Register SHIFT_count = r0;   // where count for shift operations must be
  60 
  61 #define __ _masm->
  62 
  63 


 226   // FIXME: This needs to be much more clever.  See x86.
 227 }
 228 
 229 
 230 void LIR_Assembler::osr_entry() {
 231   offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
 232   BlockBegin* osr_entry = compilation()->hir()->osr_entry();
 233   ValueStack* entry_state = osr_entry->state();
 234   int number_of_locks = entry_state->locks_size();
 235 
 236   // we jump here if osr happens with the interpreter
 237   // state set up to continue at the beginning of the
 238   // loop that triggered osr - in particular, we have
 239   // the following registers setup:
 240   //
 241   // r2: osr buffer
 242   //
 243 
 244   // build frame
 245   ciMethod* m = compilation()->method();
 246   __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), needs_stack_repair(), NULL); 
 247 
 248   // OSR buffer is
 249   //
 250   // locals[nlocals-1..0]
 251   // monitors[0..number_of_locks]
 252   //
 253   // locals is a direct copy of the interpreter frame so in the osr buffer
 254   // so first slot in the local array is the last local from the interpreter
 255   // and last slot is local[0] (receiver) from the interpreter
 256   //
 257   // Similarly with locks. The first lock slot in the osr buffer is the nth lock
 258   // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
 259   // in the interpreter frame (the method lock if a sync method)
 260 
 261   // Initialize monitors in the compiled activation.
 262   //   r2: pointer to osr buffer
 263   //
 264   // All other registers are dead at this point and the locals will be
 265   // copied into place by code emitted in the IR.
 266 


 436     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 437     __ unlock_object(r5, r4, r0, *stub->entry());
 438     __ bind(*stub->continuation());
 439   }
 440 
 441   if (compilation()->env()->dtrace_method_probes()) {
 442     __ call_Unimplemented();
 443 #if 0
 444     __ movptr(Address(rsp, 0), rax);
 445     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 446     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 447 #endif
 448   }
 449 
 450   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 451     __ mov(r0, r19);  // Restore the exception
 452   }
 453 
 454   // remove the activation and dispatch to the unwind handler
 455   __ block_comment("remove_frame and dispatch to the unwind handler");
 456   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 457   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 458 
 459   // Emit the slow path assembly
 460   if (stub != NULL) {
 461     stub->emit_code(this);
 462   }
 463 
 464   return offset;
 465 }
 466 
 467 
 468 int LIR_Assembler::emit_deopt_handler() {
 469   // if the last instruction is a call (typically to do a throw which
 470   // is coming at the end after block reordering) the return address
 471   // must still point into the code area in order to avoid assertion
 472   // failures when searching for the corresponding bci => add a nop
 473   // (was bug 5/14/1999 - gri)
 474   __ nop();
 475 
 476   // generate code for exception handler


 487   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 488   guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
 489   __ end_a_stub();
 490 
 491   return offset;
 492 }
 493 
 494 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
 495   _masm->code_section()->relocate(adr, relocInfo::poll_type);
 496   int pc_offset = code_offset();
 497   flush_debug_info(pc_offset);
 498   info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
 499   if (info->exception_handlers() != NULL) {
 500     compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
 501   }
 502 }
 503 
 504 void LIR_Assembler::return_op(LIR_Opr result) {
 505   assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
 506 
 507   ciMethod* method = compilation()->method();
 508   // Pop the stack before the safepoint code
 509   __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
 510 
 511   if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
 512     __ reserved_stack_check();
 513   }
 514 
 515   address polling_page(os::get_polling_page());
 516   __ read_polling_page(rscratch1, polling_page, relocInfo::poll_return_type);
 517   __ ret(lr);
 518 }
 519 
 520 void LIR_Assembler::store_value_type_fields_to_buf(ciValueKlass* vk) { 
 521   __ store_value_type_fields_to_buf(vk);
 522 }
 523 
 524 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
 525   address polling_page(os::get_polling_page());
 526   guarantee(info != NULL, "Shouldn't be NULL");
 527   assert(os::is_poll_address(polling_page), "should be");
 528   __ get_polling_page(rscratch1, polling_page, relocInfo::poll_type);
 529   add_debug_info_for_branch(info);  // This isn't just debug info:
 530                                     // it's the oop map
 531   __ read_polling_page(rscratch1, relocInfo::poll_type);
 532   return __ offset();
 533 }
 534 
 535 
 536 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
 537   if (from_reg == r31_sp)
 538     from_reg = sp;
 539   if (to_reg == r31_sp)
 540     to_reg = sp;
 541   __ mov(to_reg, from_reg);
 542 }
 543 


 551 
 552   switch (c->type()) {
 553     case T_INT: {
 554       assert(patch_code == lir_patch_none, "no patching handled here");
 555       __ movw(dest->as_register(), c->as_jint());
 556       break;
 557     }
 558 
 559     case T_ADDRESS: {
 560       assert(patch_code == lir_patch_none, "no patching handled here");
 561       __ mov(dest->as_register(), c->as_jint());
 562       break;
 563     }
 564 
 565     case T_LONG: {
 566       assert(patch_code == lir_patch_none, "no patching handled here");
 567       __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
 568       break;
 569     }
 570 
 571     case T_VALUETYPE:
 572     case T_OBJECT: {
 573         if (patch_code != lir_patch_none) {


 574           jobject2reg_with_patching(dest->as_register(), info);
 575         } else {
 576           jobject2reg(c->as_jobject(), dest->as_register());
 577         }
 578       break;
 579     }
 580 
 581     case T_METADATA: {
 582       if (patch_code != lir_patch_none) {
 583         klass2reg_with_patching(dest->as_register(), info);
 584       } else {
 585         __ mov_metadata(dest->as_register(), c->as_metadata());
 586       }
 587       break;
 588     }
 589 
 590     case T_FLOAT: {
 591       if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
 592         __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
 593       } else {
 594         __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
 595         __ ldrs(dest->as_float_reg(), Address(rscratch1));
 596       }


 598     }
 599 
 600     case T_DOUBLE: {
 601       if (__ operand_valid_for_float_immediate(c->as_jdouble())) {
 602         __ fmovd(dest->as_double_reg(), (c->as_jdouble()));
 603       } else {
 604         __ adr(rscratch1, InternalAddress(double_constant(c->as_jdouble())));
 605         __ ldrd(dest->as_double_reg(), Address(rscratch1));
 606       }
 607       break;
 608     }
 609 
 610     default:
 611       ShouldNotReachHere();
 612   }
 613 }
 614 
 615 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 616   LIR_Const* c = src->as_constant_ptr();
 617   switch (c->type()) {
 618   case T_VALUETYPE: 
 619   case T_OBJECT:
 620     {
 621       if (! c->as_jobject())
 622         __ str(zr, frame_map()->address_for_slot(dest->single_stack_ix()));
 623       else {
 624         const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 625         reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 626       }
 627     }
 628     break;
 629   case T_ADDRESS:
 630     {
 631       const2reg(src, FrameMap::rscratch1_opr, lir_patch_none, NULL);
 632       reg2stack(FrameMap::rscratch1_opr, dest, c->type(), false);
 633     }
 634   case T_INT:
 635   case T_FLOAT:
 636     {
 637       Register reg = zr;
 638       if (c->as_jint_bits() == 0)


 665 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 666   assert(src->is_constant(), "should not call otherwise");
 667   LIR_Const* c = src->as_constant_ptr();
 668   LIR_Address* to_addr = dest->as_address_ptr();
 669 
 670   void (Assembler::* insn)(Register Rt, const Address &adr);
 671 
 672   switch (type) {
 673   case T_ADDRESS:
 674     assert(c->as_jint() == 0, "should be");
 675     insn = &Assembler::str;
 676     break;
 677   case T_LONG:
 678     assert(c->as_jlong() == 0, "should be");
 679     insn = &Assembler::str;
 680     break;
 681   case T_INT:
 682     assert(c->as_jint() == 0, "should be");
 683     insn = &Assembler::strw;
 684     break;
 685   case T_VALUETYPE: // DMS CHECK: the code is significantly differ from x86
 686   case T_OBJECT:
 687   case T_ARRAY:
 688     assert(c->as_jobject() == 0, "should be");
 689     if (UseCompressedOops && !wide) {
 690       insn = &Assembler::strw;
 691     } else {
 692       insn = &Assembler::str;
 693     }
 694     break;
 695   case T_CHAR:
 696   case T_SHORT:
 697     assert(c->as_jint() == 0, "should be");
 698     insn = &Assembler::strh;
 699     break;
 700   case T_BOOLEAN:
 701   case T_BYTE:
 702     assert(c->as_jint() == 0, "should be");
 703     insn = &Assembler::strb;
 704     break;
 705   default:
 706     ShouldNotReachHere();
 707     insn = &Assembler::str;  // unreachable
 708   }
 709 
 710   if (info) add_debug_info_for_null_check_here(info);
 711   (_masm->*insn)(zr, as_Address(to_addr, rscratch1));
 712 }
 713 
 714 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 715   assert(src->is_register(), "should not call otherwise");
 716   assert(dest->is_register(), "should not call otherwise");
 717 
 718   // move between cpu-registers
 719   if (dest->is_single_cpu()) {
 720     if (src->type() == T_LONG) {
 721       // Can do LONG -> OBJECT
 722       move_regs(src->as_register_lo(), dest->as_register());
 723       return;
 724     }
 725     assert(src->is_single_cpu(), "must match");
 726     if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) {
 727       __ verify_oop(src->as_register());
 728     }
 729     move_regs(src->as_register(), dest->as_register());
 730 
 731   } else if (dest->is_double_cpu()) {
 732     if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) {
 733       // Surprising to me but we can see move of a long to t_object
 734       __ verify_oop(src->as_register());
 735       move_regs(src->as_register(), dest->as_register_lo());
 736       return;
 737     }
 738     assert(src->is_double_cpu(), "must match");
 739     Register f_lo = src->as_register_lo();
 740     Register f_hi = src->as_register_hi();
 741     Register t_lo = dest->as_register_lo();
 742     Register t_hi = dest->as_register_hi();
 743     assert(f_hi == f_lo, "must be same");
 744     assert(t_hi == t_lo, "must be same");
 745     move_regs(f_lo, t_lo);
 746 
 747   } else if (dest->is_single_fpu()) {
 748     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 749 
 750   } else if (dest->is_double_fpu()) {
 751     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 752 
 753   } else {
 754     ShouldNotReachHere();
 755   }
 756 }
 757 
 758 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 759   if (src->is_single_cpu()) {
 760     if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 761       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 762       __ verify_oop(src->as_register());
 763     } else if (type == T_METADATA || type == T_DOUBLE) {
 764       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 765     } else {
 766       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 767     }
 768 
 769   } else if (src->is_double_cpu()) {
 770     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 771     __ str(src->as_register_lo(), dest_addr_LO);
 772 
 773   } else if (src->is_single_fpu()) {
 774     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 775     __ strs(src->as_float_reg(), dest_addr);
 776 
 777   } else if (src->is_double_fpu()) {
 778     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 779     __ strd(src->as_double_reg(), dest_addr);
 780 
 781   } else {
 782     ShouldNotReachHere();
 783   }
 784 
 785 }
 786 
 787 
 788 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 789   LIR_Address* to_addr = dest->as_address_ptr();
 790   PatchingStub* patch = NULL;
 791   Register compressed_src = rscratch1;
 792 
 793   if (patch_code != lir_patch_none) {
 794     deoptimize_trap(info);
 795     return;
 796   }
 797 
 798   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 799     __ verify_oop(src->as_register());
 800 
 801     if (UseCompressedOops && !wide) {
 802       __ encode_heap_oop(compressed_src, src->as_register());
 803     } else {
 804       compressed_src = src->as_register();
 805     }
 806   }
 807 
 808   int null_check_here = code_offset();
 809   switch (type) {
 810     case T_FLOAT: {
 811       __ strs(src->as_float_reg(), as_Address(to_addr));
 812       break;
 813     }
 814 
 815     case T_DOUBLE: {
 816       __ strd(src->as_double_reg(), as_Address(to_addr));
 817       break;
 818     }
 819 
 820     case T_VALUETYPE: // fall through
 821     case T_ARRAY:   // fall through
 822     case T_OBJECT:  // fall through
 823       if (UseCompressedOops && !wide) {
 824         __ strw(compressed_src, as_Address(to_addr, rscratch2));
 825       } else {
 826          __ str(compressed_src, as_Address(to_addr));
 827       }
 828       break;
 829     case T_METADATA:
 830       // We get here to store a method pointer to the stack to pass to
 831       // a dtrace runtime call. This can't work on 64 bit with
 832       // compressed klass ptrs: T_METADATA can be a compressed klass
 833       // ptr or a 64 bit method pointer.
 834       ShouldNotReachHere();
 835       __ str(src->as_register(), as_Address(to_addr));
 836       break;
 837     case T_ADDRESS:
 838       __ str(src->as_register(), as_Address(to_addr));
 839       break;
 840     case T_INT:


 854 
 855     case T_CHAR:    // fall through
 856     case T_SHORT:
 857       __ strh(src->as_register(), as_Address(to_addr));
 858       break;
 859 
 860     default:
 861       ShouldNotReachHere();
 862   }
 863   if (info != NULL) {
 864     add_debug_info_for_null_check(null_check_here, info);
 865   }
 866 }
 867 
 868 
 869 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 870   assert(src->is_stack(), "should not call otherwise");
 871   assert(dest->is_register(), "should not call otherwise");
 872 
 873   if (dest->is_single_cpu()) {
 874     if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 875       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 876       __ verify_oop(dest->as_register());
 877     } else if (type == T_METADATA) {
 878       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 879     } else {
 880       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 881     }
 882 
 883   } else if (dest->is_double_cpu()) {
 884     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 885     __ ldr(dest->as_register_lo(), src_addr_LO);
 886 
 887   } else if (dest->is_single_fpu()) {
 888     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 889     __ ldrs(dest->as_float_reg(), src_addr);
 890 
 891   } else if (dest->is_double_fpu()) {
 892     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 893     __ ldrd(dest->as_double_reg(), src_addr);
 894 


 926   add_call_info_here(info);
 927 }
 928 
 929 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
 930 
 931   LIR_Opr temp;
 932   if (type == T_LONG || type == T_DOUBLE)
 933     temp = FrameMap::rscratch1_long_opr;
 934   else
 935     temp = FrameMap::rscratch1_opr;
 936 
 937   stack2reg(src, temp, src->type());
 938   reg2stack(temp, dest, dest->type(), false);
 939 }
 940 
 941 
 942 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
 943   LIR_Address* addr = src->as_address_ptr();
 944   LIR_Address* from_addr = src->as_address_ptr();
 945 
 946   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) { 
 947     __ verify_oop(addr->base()->as_pointer_register());
 948   }
 949 
 950   if (patch_code != lir_patch_none) {
 951     deoptimize_trap(info);
 952     return;
 953   }
 954 
 955   if (info != NULL) {
 956     add_debug_info_for_null_check_here(info);
 957   }
 958   int null_check_here = code_offset();
 959   switch (type) {
 960     case T_FLOAT: {
 961       __ ldrs(dest->as_float_reg(), as_Address(from_addr));
 962       break;
 963     }
 964 
 965     case T_DOUBLE: {
 966       __ ldrd(dest->as_double_reg(), as_Address(from_addr));
 967       break;
 968     }
 969 
 970     case T_VALUETYPE: // fall through
 971     case T_ARRAY:   // fall through
 972     case T_OBJECT:  // fall through
 973       if (UseCompressedOops && !wide) {
 974         __ ldrw(dest->as_register(), as_Address(from_addr));
 975       } else {
 976          __ ldr(dest->as_register(), as_Address(from_addr));
 977       }
 978       break;
 979     case T_METADATA:
 980       // We get here to store a method pointer to the stack to pass to
 981       // a dtrace runtime call. This can't work on 64 bit with
 982       // compressed klass ptrs: T_METADATA can be a compressed klass
 983       // ptr or a 64 bit method pointer.
 984       ShouldNotReachHere();
 985       __ ldr(dest->as_register(), as_Address(from_addr));
 986       break;
 987     case T_ADDRESS:
 988       // FIXME: OMG this is a horrible kludge.  Any offset from an
 989       // address that matches klass_offset_in_bytes() will be loaded
 990       // as a word, not a long.


1005 
1006     case T_BYTE:
1007       __ ldrsb(dest->as_register(), as_Address(from_addr));
1008       break;
1009     case T_BOOLEAN: {
1010       __ ldrb(dest->as_register(), as_Address(from_addr));
1011       break;
1012     }
1013 
1014     case T_CHAR:
1015       __ ldrh(dest->as_register(), as_Address(from_addr));
1016       break;
1017     case T_SHORT:
1018       __ ldrsh(dest->as_register(), as_Address(from_addr));
1019       break;
1020 
1021     default:
1022       ShouldNotReachHere();
1023   }
1024 
1025   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1026     if (UseCompressedOops && !wide) {
1027       __ decode_heap_oop(dest->as_register());
1028     }
1029 
1030     if (!UseZGC) {
1031       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1032       __ verify_oop(dest->as_register());
1033     }
1034   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1035     if (UseCompressedClassPointers) {
1036       __ andr(dest->as_register(), dest->as_register(), oopDesc::compressed_klass_mask());
1037       __ decode_klass_not_null(dest->as_register());
1038     } else {
1039       __   ubfm(dest->as_register(), dest->as_register(), 0, 63 - oopDesc::storage_props_nof_bits);
1040     }
1041   }
1042 }
1043 
1044 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1045   assert(dst->is_cpu_register(), "must be");
1046   assert(dst->type() == src->type(), "must be");
1047 
1048   if (src->is_cpu_register()) {
1049     reg2reg(src, dst);
1050   } else if (src->is_stack()) {
1051     stack2reg(src, dst, dst->type());
1052   } else if (src->is_constant()) {
1053     const2reg(src, dst, lir_patch_none, NULL);
1054   } else {
1055     ShouldNotReachHere();
1056   }
1057 }
1058 
1059 int LIR_Assembler::array_element_size(BasicType type) const {
1060   int elem_size = type2aelembytes(type);
1061   return exact_log2(elem_size);
1062 }
1063 
1064 
1065 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1066   switch (op->code()) {
1067   case lir_idiv:
1068   case lir_irem:
1069     arithmetic_idiv(op->code(),
1070                     op->in_opr1(),
1071                     op->in_opr2(),
1072                     op->in_opr3(),
1073                     op->result_opr(),
1074                     op->info());
1075     break;
1076   case lir_fmad:
1077     __ fmaddd(op->result_opr()->as_double_reg(),


1229     __ ldrb(rscratch1, Address(op->klass()->as_register(),
1230                                InstanceKlass::init_state_offset()));
1231     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1232     add_debug_info_for_null_check_here(op->stub()->info());
1233     __ br(Assembler::NE, *op->stub()->entry());
1234   }
1235   __ allocate_object(op->obj()->as_register(),
1236                      op->tmp1()->as_register(),
1237                      op->tmp2()->as_register(),
1238                      op->header_size(),
1239                      op->object_size(),
1240                      op->klass()->as_register(),
1241                      *op->stub()->entry());
1242   __ bind(*op->stub()->continuation());
1243 }
1244 
1245 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1246   Register len =  op->len()->as_register();
1247   __ uxtw(len, len);
1248 
1249   if (UseSlowPath || op->type() == T_VALUETYPE ||
1250       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1251       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1252     __ b(*op->stub()->entry());
1253   } else {
1254     Register tmp1 = op->tmp1()->as_register();
1255     Register tmp2 = op->tmp2()->as_register();
1256     Register tmp3 = op->tmp3()->as_register();
1257     if (len == tmp1) {
1258       tmp1 = tmp3;
1259     } else if (len == tmp2) {
1260       tmp2 = tmp3;
1261     } else if (len == tmp3) {
1262       // everything is ok
1263     } else {
1264       __ mov(tmp3, len);
1265     }
1266     __ allocate_array(op->obj()->as_register(),
1267                       len,
1268                       tmp1,
1269                       tmp2,


1541     __ bind(success);
1542     if (dst != obj) {
1543       __ mov(dst, obj);
1544     }
1545   } else if (code == lir_instanceof) {
1546     Register obj = op->object()->as_register();
1547     Register dst = op->result_opr()->as_register();
1548     Label success, failure, done;
1549     emit_typecheck_helper(op, &success, &failure, &failure);
1550     __ bind(failure);
1551     __ mov(dst, zr);
1552     __ b(done);
1553     __ bind(success);
1554     __ mov(dst, 1);
1555     __ bind(done);
1556   } else {
1557     ShouldNotReachHere();
1558   }
1559 }
1560 
1561 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1562   // We are loading/storing an array that *may* be a flattened array (the declared type
1563   // Object[], interface[], or VT?[]). If this array is flattened, take slow path.
1564 
1565   __ load_storage_props(op->tmp()->as_register(), op->array()->as_register());
1566   __ tst(op->tmp()->as_register(), ArrayStorageProperties::flattened_value);
1567   __ br(Assembler::NE, *op->stub()->entry());
1568   if (!op->value()->is_illegal()) {
1569     // We are storing into the array.
1570     Label skip;
1571     __ tst(op->tmp()->as_register(), ArrayStorageProperties::null_free_value);
1572     __ br(Assembler::EQ, skip);
1573     // The array is not flattened, but it is null_free. If we are storing
1574     // a null, take the slow path (which will throw NPE).
1575     __ cbz(op->value()->as_register(), *op->stub()->entry());
1576     __ bind(skip);
1577   }
1578 
1579 }
1580 
1581 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1582   // This is called when we use aastore into a an array declared as "[LVT;",
1583   // where we know VT is not flattenable (due to ValueArrayElemMaxFlatOops, etc).
1584   // However, we need to do a NULL check if the actual array is a "[QVT;".
1585 
1586   __ load_storage_props(op->tmp()->as_register(), op->array()->as_register());
1587   __ mov(rscratch1, (uint64_t) ArrayStorageProperties::null_free_value);
1588   __ cmp(op->tmp()->as_register(), rscratch1);
1589 }
1590 
1591 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1592   Label L_oops_equal;
1593   Label L_oops_not_equal;
1594   Label L_end;
1595 
1596   Register left  = op->left()->as_register();
1597   Register right = op->right()->as_register();
1598 
1599   __ cmp(left, right);
1600   __ br(Assembler::EQ, L_oops_equal);
1601 
1602   // (1) Null check -- if one of the operands is null, the other must not be null (because
1603   //     the two references are not equal), so they are not substitutable,
1604   //     FIXME: do null check only if the operand is nullable
1605   {
1606     __ cbz(left, L_oops_not_equal);
1607     __ cbz(right, L_oops_not_equal);
1608   }
1609 
1610 
1611   ciKlass* left_klass = op->left_klass();
1612   ciKlass* right_klass = op->right_klass();
1613 
1614   // (2) Value object check -- if either of the operands is not a value object,
1615   //     they are not substitutable. We do this only if we are not sure that the
1616   //     operands are value objects
1617   if ((left_klass == NULL || right_klass == NULL) ||// The klass is still unloaded, or came from a Phi node.
1618       !left_klass->is_valuetype() || !right_klass->is_valuetype()) {
1619     Register tmp1  = rscratch1; /* op->tmp1()->as_register(); */
1620     Register tmp2  = rscratch2; /* op->tmp2()->as_register(); */
1621 
1622     __ mov(tmp1, (intptr_t)markOopDesc::always_locked_pattern);
1623 
1624     __ ldr(tmp2, Address(left, oopDesc::mark_offset_in_bytes()));
1625     __ andr(tmp1, tmp1, tmp2);
1626 
1627     __ ldr(tmp2, Address(right, oopDesc::mark_offset_in_bytes()));
1628     __ andr(tmp1, tmp1, tmp2); 
1629 
1630     __ mov(tmp2, (intptr_t)markOopDesc::always_locked_pattern);
1631     __ cmp(tmp1, tmp2); 
1632     __ br(Assembler::NE, L_oops_not_equal);
1633   }
1634 
1635   // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1636   if (left_klass != NULL && left_klass->is_valuetype() && left_klass == right_klass) {
1637     // No need to load klass -- the operands are statically known to be the same value klass.
1638     __ b(*op->stub()->entry());
1639   } else {
1640     Register left_klass_op = op->left_klass_op()->as_register();
1641     Register right_klass_op = op->right_klass_op()->as_register();
1642 
1643     // DMS CHECK, likely x86 bug, make aarch64 implementation correct
1644     __ load_klass(left_klass_op, left);
1645     __ load_klass(right_klass_op, right);
1646     __ cmp(left_klass_op, right_klass_op);
1647     __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1648     // fall through to L_oops_not_equal
1649   }
1650 
1651   __ bind(L_oops_not_equal);
1652   move(op->not_equal_result(), op->result_opr());
1653   __ b(L_end);
1654 
1655   __ bind(L_oops_equal);
1656   move(op->equal_result(), op->result_opr());
1657   __ b(L_end);
1658 
1659   // We've returned from the stub. op->result_opr() contains 0x0 IFF the two
1660   // operands are not substitutable. (Don't compare against 0x1 in case the
1661   // C compiler is naughty)
1662   __ bind(*op->stub()->continuation());
1663 
1664   if (op->result_opr()->type() == T_LONG) {
1665     __ cbzw(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1666   } else {
1667     __ cbz(op->result_opr()->as_register(), L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1668   }
1669 
1670   move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1671   // fall-through
1672   __ bind(L_end);
1673 
1674 }
1675 
1676 
1677 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1678   __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1679   __ cset(rscratch1, Assembler::NE);
1680   __ membar(__ AnyAny);
1681 }
1682 
1683 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1684   __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1685   __ cset(rscratch1, Assembler::NE);
1686   __ membar(__ AnyAny);
1687 }
1688 
1689 
1690 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1691   assert(VM_Version::supports_cx8(), "wrong machine");
1692   Register addr;
1693   if (op->addr()->is_register()) {
1694     addr = as_reg(op->addr());
1695   } else {
1696     assert(op->addr()->is_address(), "what else?");


2067       }
2068     }
2069   } else {
2070     Register rreg = right->as_register();
2071     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
2072   }
2073 }
2074 
2075 
2076 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2077   if (opr1->is_constant() && opr2->is_single_cpu()) {
2078     // tableswitch
2079     Register reg = as_reg(opr2);
2080     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
2081     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
2082   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
2083     Register reg1 = as_reg(opr1);
2084     if (opr2->is_single_cpu()) {
2085       // cpu register - cpu register
2086       Register reg2 = opr2->as_register();
2087       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
2088         __ cmpoop(reg1, reg2);
2089       } else {
2090         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE,  "cmp int, oop?");
2091         __ cmpw(reg1, reg2);
2092       }
2093       return;
2094     }
2095     if (opr2->is_double_cpu()) {
2096       // cpu register - cpu register
2097       Register reg2 = opr2->as_register_lo();
2098       __ cmp(reg1, reg2);
2099       return;
2100     }
2101 
2102     if (opr2->is_constant()) {
2103       bool is_32bit = false; // width of register operand
2104       jlong imm;
2105 
2106       switch(opr2->type()) {
2107       case T_INT:
2108         imm = opr2->as_constant_ptr()->as_jint();
2109         is_32bit = true;
2110         break;
2111       case T_LONG:
2112         imm = opr2->as_constant_ptr()->as_jlong();
2113         break;
2114       case T_ADDRESS:
2115         imm = opr2->as_constant_ptr()->as_jint();
2116         break;
2117       case T_VALUETYPE:
2118       case T_OBJECT:
2119       case T_ARRAY:
2120         jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
2121         __ cmpoop(reg1, rscratch1);
2122         return;
2123       default:
2124         ShouldNotReachHere();
2125         imm = 0;  // unreachable
2126         break;
2127       }
2128 
2129       if (Assembler::operand_valid_for_add_sub_immediate(imm)) {
2130         if (is_32bit)
2131           __ cmpw(reg1, imm);
2132         else
2133           __ subs(zr, reg1, imm);
2134         return;
2135       } else {
2136         __ mov(rscratch1, imm);
2137         if (is_32bit)


2264   __ b(_unwind_handler_entry);
2265 }
2266 
2267 
2268 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) {
2269   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2270   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2271 
2272   switch (left->type()) {
2273     case T_INT: {
2274       switch (code) {
2275       case lir_shl:  __ lslvw (dreg, lreg, count->as_register()); break;
2276       case lir_shr:  __ asrvw (dreg, lreg, count->as_register()); break;
2277       case lir_ushr: __ lsrvw (dreg, lreg, count->as_register()); break;
2278       default:
2279         ShouldNotReachHere();
2280         break;
2281       }
2282       break;
2283     case T_LONG:
2284     case T_VALUETYPE: 
2285     case T_ADDRESS:
2286     case T_OBJECT:
2287       switch (code) {
2288       case lir_shl:  __ lslv (dreg, lreg, count->as_register()); break;
2289       case lir_shr:  __ asrv (dreg, lreg, count->as_register()); break;
2290       case lir_ushr: __ lsrv (dreg, lreg, count->as_register()); break;
2291       default:
2292         ShouldNotReachHere();
2293         break;
2294       }
2295       break;
2296     default:
2297       ShouldNotReachHere();
2298       break;
2299     }
2300   }
2301 }
2302 
2303 
2304 void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) {
2305   Register dreg = dest->is_single_cpu() ? dest->as_register() : dest->as_register_lo();
2306   Register lreg = left->is_single_cpu() ? left->as_register() : left->as_register_lo();
2307 
2308   switch (left->type()) {
2309     case T_INT: {
2310       switch (code) {
2311       case lir_shl:  __ lslw (dreg, lreg, count); break;
2312       case lir_shr:  __ asrw (dreg, lreg, count); break;
2313       case lir_ushr: __ lsrw (dreg, lreg, count); break;
2314       default:
2315         ShouldNotReachHere();
2316         break;
2317       }
2318       break;
2319     case T_LONG:
2320     case T_ADDRESS:
2321     case T_VALUETYPE:
2322     case T_OBJECT:
2323       switch (code) {
2324       case lir_shl:  __ lsl (dreg, lreg, count); break;
2325       case lir_shr:  __ asr (dreg, lreg, count); break;
2326       case lir_ushr: __ lsr (dreg, lreg, count); break;
2327       default:
2328         ShouldNotReachHere();
2329         break;
2330       }
2331       break;
2332     default:
2333       ShouldNotReachHere();
2334       break;
2335     }
2336   }
2337 }
2338 
2339 
2340 void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
2341   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");


2346 
2347 
2348 void LIR_Assembler::store_parameter(jint c,     int offset_from_rsp_in_words) {
2349   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2350   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2351   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2352   __ mov (rscratch1, c);
2353   __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2354 }
2355 
2356 
2357 void LIR_Assembler::store_parameter(jobject o,  int offset_from_rsp_in_words) {
2358   ShouldNotReachHere();
2359   assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2360   int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2361   assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2362   __ lea(rscratch1, __ constant_oop_address(o));
2363   __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2364 }
2365 
2366 void LIR_Assembler::arraycopy_valuetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest) {
2367   __ load_storage_props(tmp, obj);
2368   if (is_dest) {
2369     // We also take slow path if it's a null_free destination array, just in case the source array
2370     // contains NULLs.
2371     __ tst(tmp, ArrayStorageProperties::flattened_value | ArrayStorageProperties::null_free_value);
2372   } else {
2373     __ tst(tmp, ArrayStorageProperties::flattened_value);
2374   }
2375   __ br(Assembler::NE, *slow_path->entry());
2376 }
2377 
2378 
2379 
2380 // This code replaces a call to arraycopy; no exception may
2381 // be thrown in this code, they must be thrown in the System.arraycopy
2382 // activation frame; we could save some checks if this would not be the case
2383 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2384   ciArrayKlass* default_type = op->expected_type();
2385   Register src = op->src()->as_register();
2386   Register dst = op->dst()->as_register();
2387   Register src_pos = op->src_pos()->as_register();
2388   Register dst_pos = op->dst_pos()->as_register();
2389   Register length  = op->length()->as_register();
2390   Register tmp = op->tmp()->as_register();
2391 
2392   __ resolve(ACCESS_READ, src);
2393   __ resolve(ACCESS_WRITE, dst);
2394 
2395   CodeStub* stub = op->stub();
2396   int flags = op->flags();
2397   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2398   if (basic_type == T_ARRAY || basic_type == T_VALUETYPE) basic_type = T_OBJECT;
2399 
2400   if (flags & LIR_OpArrayCopy::always_slow_path) {
2401     __ b(*stub->entry());
2402     __ bind(*stub->continuation());
2403     return;
2404   }
2405 
2406   if (flags & LIR_OpArrayCopy::src_valuetype_check) {
2407     arraycopy_valuetype_check(src, tmp, stub, false);
2408   }
2409 
2410   if (flags & LIR_OpArrayCopy::dst_valuetype_check) {
2411     arraycopy_valuetype_check(dst, tmp, stub, true);
2412   }
2413 
2414 
2415 
2416   // if we don't know anything, just go through the generic arraycopy
2417   if (default_type == NULL // || basic_type == T_OBJECT
2418       ) {
2419     Label done;
2420     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2421 
2422     // Save the arguments in case the generic arraycopy fails and we
2423     // have to fall back to the JNI stub
2424     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2425     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2426     __ str(src,              Address(sp, 4*BytesPerWord));
2427 
2428     address copyfunc_addr = StubRoutines::generic_arraycopy();
2429     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2430 
2431     // The arguments are in java calling convention so we shift them
2432     // to C convention
2433     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2434     __ mov(c_rarg0, j_rarg0);


3063 
3064 
3065 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3066   assert(!tmp->is_valid(), "don't need temporary");
3067 
3068   CodeBlob *cb = CodeCache::find_blob(dest);
3069   if (cb) {
3070     __ far_call(RuntimeAddress(dest));
3071   } else {
3072     __ mov(rscratch1, RuntimeAddress(dest));
3073     int len = args->length();
3074     int type = 0;
3075     if (! result->is_illegal()) {
3076       switch (result->type()) {
3077       case T_VOID:
3078         type = 0;
3079         break;
3080       case T_INT:
3081       case T_LONG:
3082       case T_OBJECT:
3083       case T_VALUETYPE:
3084         type = 1;
3085         break;
3086       case T_FLOAT:
3087         type = 2;
3088         break;
3089       case T_DOUBLE:
3090         type = 3;
3091         break;
3092       default:
3093         ShouldNotReachHere();
3094         break;
3095       }
3096     }
3097     int num_gpargs = 0;
3098     int num_fpargs = 0;
3099     for (int i = 0; i < args->length(); i++) {
3100       LIR_Opr arg = args->at(i);
3101       if (arg->type() == T_FLOAT || arg->type() == T_DOUBLE) {
3102         num_fpargs++;
3103       } else {


3330 #endif
3331 }
3332 
3333 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3334   Address addr = as_Address(src->as_address_ptr());
3335   BasicType type = src->type();
3336   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3337 
3338   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3339   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3340 
3341   switch(type) {
3342   case T_INT:
3343     xchg = &MacroAssembler::atomic_xchgalw;
3344     add = &MacroAssembler::atomic_addalw;
3345     break;
3346   case T_LONG:
3347     xchg = &MacroAssembler::atomic_xchgal;
3348     add = &MacroAssembler::atomic_addal;
3349     break;
3350   case T_VALUETYPE:
3351   case T_OBJECT:
3352   case T_ARRAY:
3353     if (UseCompressedOops) {
3354       xchg = &MacroAssembler::atomic_xchgalw;
3355       add = &MacroAssembler::atomic_addalw;
3356     } else {
3357       xchg = &MacroAssembler::atomic_xchgal;
3358       add = &MacroAssembler::atomic_addal;
3359     }
3360     break;
3361   default:
3362     ShouldNotReachHere();
3363     xchg = &MacroAssembler::atomic_xchgal;
3364     add = &MacroAssembler::atomic_addal; // unreachable
3365   }
3366 
3367   switch (code) {
3368   case lir_xadd:
3369     {
3370       RegisterOrConstant inc;


< prev index next >