< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page


 183 }
 184 
 185 void LIR_Assembler::ffree(int i) {
 186   __ ffree(i);
 187 }
 188 
 189 void LIR_Assembler::breakpoint() {
 190   __ int3();
 191 }
 192 
 193 void LIR_Assembler::push(LIR_Opr opr) {
 194   if (opr->is_single_cpu()) {
 195     __ push_reg(opr->as_register());
 196   } else if (opr->is_double_cpu()) {
 197     NOT_LP64(__ push_reg(opr->as_register_hi()));
 198     __ push_reg(opr->as_register_lo());
 199   } else if (opr->is_stack()) {
 200     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 201   } else if (opr->is_constant()) {
 202     LIR_Const* const_opr = opr->as_constant_ptr();
 203     if (const_opr->type() == T_OBJECT) {
 204       __ push_oop(const_opr->as_jobject());
 205     } else if (const_opr->type() == T_INT) {
 206       __ push_jint(const_opr->as_jint());
 207     } else {
 208       ShouldNotReachHere();
 209     }
 210 
 211   } else {
 212     ShouldNotReachHere();
 213   }
 214 }
 215 
 216 void LIR_Assembler::pop(LIR_Opr opr) {
 217   if (opr->is_single_cpu()) {
 218     __ pop_reg(opr->as_register());
 219   } else {
 220     ShouldNotReachHere();
 221   }
 222 }
 223 


 613       break;
 614     }
 615 
 616     case T_ADDRESS: {
 617       assert(patch_code == lir_patch_none, "no patching handled here");
 618       __ movptr(dest->as_register(), c->as_jint());
 619       break;
 620     }
 621 
 622     case T_LONG: {
 623       assert(patch_code == lir_patch_none, "no patching handled here");
 624 #ifdef _LP64
 625       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 626 #else
 627       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 628       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 629 #endif // _LP64
 630       break;
 631     }
 632 

 633     case T_OBJECT: {
 634       if (patch_code != lir_patch_none) {
 635         jobject2reg_with_patching(dest->as_register(), info);
 636       } else {
 637         __ movoop(dest->as_register(), c->as_jobject());
 638       }
 639       break;
 640     }
 641 
 642     case T_METADATA: {
 643       if (patch_code != lir_patch_none) {
 644         klass2reg_with_patching(dest->as_register(), info);
 645       } else {
 646         __ mov_metadata(dest->as_register(), c->as_metadata());
 647       }
 648       break;
 649     }
 650 
 651     case T_FLOAT: {
 652       if (dest->is_single_xmm()) {


 695     default:
 696       ShouldNotReachHere();
 697   }
 698 }
 699 
 700 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 701   assert(src->is_constant(), "should not call otherwise");
 702   assert(dest->is_stack(), "should not call otherwise");
 703   LIR_Const* c = src->as_constant_ptr();
 704 
 705   switch (c->type()) {
 706     case T_INT:  // fall through
 707     case T_FLOAT:
 708       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 709       break;
 710 
 711     case T_ADDRESS:
 712       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 713       break;
 714 

 715     case T_OBJECT:
 716       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 717       break;
 718 
 719     case T_LONG:  // fall through
 720     case T_DOUBLE:
 721 #ifdef _LP64
 722       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 723                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 724 #else
 725       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 726                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 727       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 728                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 729 #endif // _LP64
 730       break;
 731 
 732     default:
 733       ShouldNotReachHere();
 734   }
 735 }
 736 
 737 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 738   assert(src->is_constant(), "should not call otherwise");
 739   assert(dest->is_address(), "should not call otherwise");
 740   LIR_Const* c = src->as_constant_ptr();
 741   LIR_Address* addr = dest->as_address_ptr();
 742 
 743   int null_check_here = code_offset();
 744   switch (type) {
 745     case T_INT:    // fall through
 746     case T_FLOAT:
 747       __ movl(as_Address(addr), c->as_jint_bits());
 748       break;
 749 
 750     case T_ADDRESS:
 751       __ movptr(as_Address(addr), c->as_jint_bits());
 752       break;
 753 

 754     case T_OBJECT:  // fall through
 755     case T_ARRAY:
 756       if (c->as_jobject() == NULL) {
 757         if (UseCompressedOops && !wide) {
 758           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 759         } else {
 760 #ifdef _LP64
 761           __ xorptr(rscratch1, rscratch1);
 762           null_check_here = code_offset();
 763           __ movptr(as_Address(addr), rscratch1);
 764 #else
 765           __ movptr(as_Address(addr), NULL_WORD);
 766 #endif
 767         }
 768       } else {
 769         if (is_literal_address(addr)) {
 770           ShouldNotReachHere();
 771           __ movoop(as_Address(addr, noreg), c->as_jobject());
 772         } else {
 773 #ifdef _LP64


 822   if (info != NULL) {
 823     add_debug_info_for_null_check(null_check_here, info);
 824   }
 825 }
 826 
 827 
 828 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 829   assert(src->is_register(), "should not call otherwise");
 830   assert(dest->is_register(), "should not call otherwise");
 831 
 832   // move between cpu-registers
 833   if (dest->is_single_cpu()) {
 834 #ifdef _LP64
 835     if (src->type() == T_LONG) {
 836       // Can do LONG -> OBJECT
 837       move_regs(src->as_register_lo(), dest->as_register());
 838       return;
 839     }
 840 #endif
 841     assert(src->is_single_cpu(), "must match");
 842     if (src->type() == T_OBJECT) {
 843       __ verify_oop(src->as_register());
 844     }
 845     move_regs(src->as_register(), dest->as_register());
 846 
 847   } else if (dest->is_double_cpu()) {
 848 #ifdef _LP64
 849     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 850       // Surprising to me but we can see move of a long to t_object
 851       __ verify_oop(src->as_register());
 852       move_regs(src->as_register(), dest->as_register_lo());
 853       return;
 854     }
 855 #endif
 856     assert(src->is_double_cpu(), "must match");
 857     Register f_lo = src->as_register_lo();
 858     Register f_hi = src->as_register_hi();
 859     Register t_lo = dest->as_register_lo();
 860     Register t_hi = dest->as_register_hi();
 861 #ifdef _LP64
 862     assert(f_hi == f_lo, "must be same");
 863     assert(t_hi == t_lo, "must be same");
 864     move_regs(f_lo, t_lo);
 865 #else
 866     assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
 867 
 868 
 869     if (f_lo == t_hi && f_hi == t_lo) {


 900     __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
 901   } else if (dest->is_double_xmm()) {
 902     assert(src->is_double_xmm(), "must match");
 903     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 904 
 905     // move between fpu-registers (no instruction necessary because of fpu-stack)
 906   } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
 907     assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
 908     assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
 909   } else {
 910     ShouldNotReachHere();
 911   }
 912 }
 913 
 914 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 915   assert(src->is_register(), "should not call otherwise");
 916   assert(dest->is_stack(), "should not call otherwise");
 917 
 918   if (src->is_single_cpu()) {
 919     Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
 920     if (type == T_OBJECT || type == T_ARRAY) {
 921       __ verify_oop(src->as_register());
 922       __ movptr (dst, src->as_register());
 923     } else if (type == T_METADATA) {
 924       __ movptr (dst, src->as_register());
 925     } else {
 926       __ movl (dst, src->as_register());
 927     }
 928 
 929   } else if (src->is_double_cpu()) {
 930     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 931     Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
 932     __ movptr (dstLO, src->as_register_lo());
 933     NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
 934 
 935   } else if (src->is_single_xmm()) {
 936     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 937     __ movflt(dst_addr, src->as_xmm_float_reg());
 938 
 939   } else if (src->is_double_xmm()) {
 940     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());


 946     if (pop_fpu_stack)     __ fstp_s (dst_addr);
 947     else                   __ fst_s  (dst_addr);
 948 
 949   } else if (src->is_double_fpu()) {
 950     assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
 951     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 952     if (pop_fpu_stack)     __ fstp_d (dst_addr);
 953     else                   __ fst_d  (dst_addr);
 954 
 955   } else {
 956     ShouldNotReachHere();
 957   }
 958 }
 959 
 960 
 961 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 962   LIR_Address* to_addr = dest->as_address_ptr();
 963   PatchingStub* patch = NULL;
 964   Register compressed_src = rscratch1;
 965 
 966   if (type == T_ARRAY || type == T_OBJECT) {
 967     __ verify_oop(src->as_register());
 968 #ifdef _LP64
 969     if (UseCompressedOops && !wide) {
 970       __ movptr(compressed_src, src->as_register());
 971       __ encode_heap_oop(compressed_src);
 972       if (patch_code != lir_patch_none) {
 973         info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
 974       }
 975     }
 976 #endif
 977   }
 978 
 979   if (patch_code != lir_patch_none) {
 980     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 981     Address toa = as_Address(to_addr);
 982     assert(toa.disp() != 0, "must have");
 983   }
 984 
 985   int null_check_here = code_offset();
 986   switch (type) {


 991         assert(src->is_single_fpu(), "must be");
 992         assert(src->fpu_regnr() == 0, "argument must be on TOS");
 993         if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
 994         else                    __ fst_s (as_Address(to_addr));
 995       }
 996       break;
 997     }
 998 
 999     case T_DOUBLE: {
1000       if (src->is_double_xmm()) {
1001         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1002       } else {
1003         assert(src->is_double_fpu(), "must be");
1004         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1005         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1006         else                    __ fst_d (as_Address(to_addr));
1007       }
1008       break;
1009     }
1010 

1011     case T_ARRAY:   // fall through
1012     case T_OBJECT:  // fall through
1013       if (UseCompressedOops && !wide) {
1014         __ movl(as_Address(to_addr), compressed_src);
1015       } else {
1016         __ movptr(as_Address(to_addr), src->as_register());
1017       }
1018       break;
1019     case T_METADATA:
1020       // We get here to store a method pointer to the stack to pass to
1021       // a dtrace runtime call. This can't work on 64 bit with
1022       // compressed klass ptrs: T_METADATA can be a compressed klass
1023       // ptr or a 64 bit method pointer.
1024       LP64_ONLY(ShouldNotReachHere());
1025       __ movptr(as_Address(to_addr), src->as_register());
1026       break;
1027     case T_ADDRESS:
1028       __ movptr(as_Address(to_addr), src->as_register());
1029       break;
1030     case T_INT:


1081       break;
1082 
1083     default:
1084       ShouldNotReachHere();
1085   }
1086   if (info != NULL) {
1087     add_debug_info_for_null_check(null_check_here, info);
1088   }
1089 
1090   if (patch_code != lir_patch_none) {
1091     patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1092   }
1093 }
1094 
1095 
1096 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1097   assert(src->is_stack(), "should not call otherwise");
1098   assert(dest->is_register(), "should not call otherwise");
1099 
1100   if (dest->is_single_cpu()) {
1101     if (type == T_ARRAY || type == T_OBJECT) {
1102       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1103       __ verify_oop(dest->as_register());
1104     } else if (type == T_METADATA) {
1105       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1106     } else {
1107       __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1108     }
1109 
1110   } else if (dest->is_double_cpu()) {
1111     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1112     Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1113     __ movptr(dest->as_register_lo(), src_addr_LO);
1114     NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1115 
1116   } else if (dest->is_single_xmm()) {
1117     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1118     __ movflt(dest->as_xmm_float_reg(), src_addr);
1119 
1120   } else if (dest->is_double_xmm()) {
1121     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1122     __ movdbl(dest->as_xmm_double_reg(), src_addr);
1123 
1124   } else if (dest->is_single_fpu()) {
1125     assert(dest->fpu_regnr() == 0, "dest must be TOS");
1126     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1127     __ fld_s(src_addr);
1128 
1129   } else if (dest->is_double_fpu()) {
1130     assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1131     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1132     __ fld_d(src_addr);
1133 
1134   } else {
1135     ShouldNotReachHere();
1136   }
1137 }
1138 
1139 
1140 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1141   if (src->is_single_stack()) {
1142     if (type == T_OBJECT || type == T_ARRAY) {
1143       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1144       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1145     } else {
1146 #ifndef _LP64
1147       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1148       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1149 #else
1150       //no pushl on 64bits
1151       __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1152       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1153 #endif
1154     }
1155 
1156   } else if (src->is_double_stack()) {
1157 #ifdef _LP64
1158     __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1159     __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1160 #else
1161     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1162     // push and pop the part at src + wordSize, adding wordSize for the previous push
1163     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1164     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1165     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1166 #endif // _LP64
1167 
1168   } else {
1169     ShouldNotReachHere();
1170   }
1171 }
1172 
1173 
1174 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1175   assert(src->is_address(), "should not call otherwise");
1176   assert(dest->is_register(), "should not call otherwise");
1177 
1178   LIR_Address* addr = src->as_address_ptr();
1179   Address from_addr = as_Address(addr);
1180 
1181   if (addr->base()->type() == T_OBJECT) {
1182     __ verify_oop(addr->base()->as_pointer_register());
1183   }
1184 
1185   switch (type) {
1186     case T_BOOLEAN: // fall through
1187     case T_BYTE:    // fall through
1188     case T_CHAR:    // fall through
1189     case T_SHORT:
1190       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1191         // on pre P6 processors we may get partial register stalls
1192         // so blow away the value of to_rinfo before loading a
1193         // partial word into it.  Do it here so that it precedes
1194         // the potential patch point below.
1195         __ xorptr(dest->as_register(), dest->as_register());
1196       }
1197       break;
1198    default:
1199      break;
1200   }
1201 


1214         __ movflt(dest->as_xmm_float_reg(), from_addr);
1215       } else {
1216         assert(dest->is_single_fpu(), "must be");
1217         assert(dest->fpu_regnr() == 0, "dest must be TOS");
1218         __ fld_s(from_addr);
1219       }
1220       break;
1221     }
1222 
1223     case T_DOUBLE: {
1224       if (dest->is_double_xmm()) {
1225         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1226       } else {
1227         assert(dest->is_double_fpu(), "must be");
1228         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1229         __ fld_d(from_addr);
1230       }
1231       break;
1232     }
1233 

1234     case T_OBJECT:  // fall through
1235     case T_ARRAY:   // fall through
1236       if (UseCompressedOops && !wide) {
1237         __ movl(dest->as_register(), from_addr);
1238       } else {
1239         __ movptr(dest->as_register(), from_addr);
1240       }
1241       break;
1242 
1243     case T_ADDRESS:
1244       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1245         __ movl(dest->as_register(), from_addr);
1246       } else {
1247         __ movptr(dest->as_register(), from_addr);
1248       }
1249       break;
1250     case T_INT:
1251       __ movl(dest->as_register(), from_addr);
1252       break;
1253 


1323     case T_SHORT: {
1324       Register dest_reg = dest->as_register();
1325       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1326         __ movswl(dest_reg, from_addr);
1327       } else {
1328         __ movw(dest_reg, from_addr);
1329         __ shll(dest_reg, 16);
1330         __ sarl(dest_reg, 16);
1331       }
1332       break;
1333     }
1334 
1335     default:
1336       ShouldNotReachHere();
1337   }
1338 
1339   if (patch != NULL) {
1340     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1341   }
1342 
1343   if (type == T_ARRAY || type == T_OBJECT) {
1344 #ifdef _LP64
1345     if (UseCompressedOops && !wide) {
1346       __ decode_heap_oop(dest->as_register());
1347     }
1348 #endif
1349 
1350     // Load barrier has not yet been applied, so ZGC can't verify the oop here
1351     if (!UseZGC) {
1352       __ verify_oop(dest->as_register());
1353     }
1354   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1355 #ifdef _LP64
1356     if (UseCompressedClassPointers) {
1357       __ decode_klass_not_null(dest->as_register());
1358     }
1359 #endif
1360   }
1361 }
1362 
1363 


1560     add_debug_info_for_null_check_here(op->stub()->info());
1561     __ cmpb(Address(op->klass()->as_register(),
1562                     InstanceKlass::init_state_offset()),
1563                     InstanceKlass::fully_initialized);
1564     __ jcc(Assembler::notEqual, *op->stub()->entry());
1565   }
1566   __ allocate_object(op->obj()->as_register(),
1567                      op->tmp1()->as_register(),
1568                      op->tmp2()->as_register(),
1569                      op->header_size(),
1570                      op->object_size(),
1571                      op->klass()->as_register(),
1572                      *op->stub()->entry());
1573   __ bind(*op->stub()->continuation());
1574 }
1575 
1576 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1577   Register len =  op->len()->as_register();
1578   LP64_ONLY( __ movslq(len, len); )
1579 
1580   if (UseSlowPath ||
1581       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1582       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1583     __ jmp(*op->stub()->entry());
1584   } else {
1585     Register tmp1 = op->tmp1()->as_register();
1586     Register tmp2 = op->tmp2()->as_register();
1587     Register tmp3 = op->tmp3()->as_register();
1588     if (len == tmp1) {
1589       tmp1 = tmp3;
1590     } else if (len == tmp2) {
1591       tmp2 = tmp3;
1592     } else if (len == tmp3) {
1593       // everything is ok
1594     } else {
1595       __ mov(tmp3, len);
1596     }
1597     __ allocate_array(op->obj()->as_register(),
1598                       len,
1599                       tmp1,
1600                       tmp2,


2481       int r_hi = right->as_constant_ptr()->as_jint_hi();
2482       switch (code) {
2483         case lir_logic_and:
2484           __ andl(l_lo, r_lo);
2485           __ andl(l_hi, r_hi);
2486           break;
2487         case lir_logic_or:
2488           __ orl(l_lo, r_lo);
2489           __ orl(l_hi, r_hi);
2490           break;
2491         case lir_logic_xor:
2492           __ xorl(l_lo, r_lo);
2493           __ xorl(l_hi, r_hi);
2494           break;
2495         default: ShouldNotReachHere();
2496       }
2497 #endif // _LP64
2498     } else {
2499 #ifdef _LP64
2500       Register r_lo;
2501       if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2502         r_lo = right->as_register();
2503       } else {
2504         r_lo = right->as_register_lo();
2505       }
2506 #else
2507       Register r_lo = right->as_register_lo();
2508       Register r_hi = right->as_register_hi();
2509       assert(l_lo != r_hi, "overwriting registers");
2510 #endif
2511       switch (code) {
2512         case lir_logic_and:
2513           __ andptr(l_lo, r_lo);
2514           NOT_LP64(__ andptr(l_hi, r_hi);)
2515           break;
2516         case lir_logic_or:
2517           __ orptr(l_lo, r_lo);
2518           NOT_LP64(__ orptr(l_hi, r_hi);)
2519           break;
2520         case lir_logic_xor:
2521           __ xorptr(l_lo, r_lo);


2594     move_regs(lreg, rax);
2595 
2596     int idivl_offset = __ corrected_idivl(rreg);
2597     if (ImplicitDiv0Checks) {
2598       add_debug_info_for_div0(idivl_offset, info);
2599     }
2600     if (code == lir_irem) {
2601       move_regs(rdx, dreg); // result is in rdx
2602     } else {
2603       move_regs(rax, dreg);
2604     }
2605   }
2606 }
2607 
2608 
2609 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2610   if (opr1->is_single_cpu()) {
2611     Register reg1 = opr1->as_register();
2612     if (opr2->is_single_cpu()) {
2613       // cpu register - cpu register
2614       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2615         __ cmpoop(reg1, opr2->as_register());
2616       } else {
2617         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2618         __ cmpl(reg1, opr2->as_register());
2619       }
2620     } else if (opr2->is_stack()) {
2621       // cpu register - stack
2622       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2623         __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2624       } else {
2625         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2626       }
2627     } else if (opr2->is_constant()) {
2628       // cpu register - constant
2629       LIR_Const* c = opr2->as_constant_ptr();
2630       if (c->type() == T_INT) {
2631         __ cmpl(reg1, c->as_jint());
2632       } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2633         // In 64bit oops are single register
2634         jobject o = c->as_jobject();
2635         if (o == NULL) {
2636           __ cmpptr(reg1, (int32_t)NULL_WORD);
2637         } else {
2638           __ cmpoop(reg1, o);
2639         }
2640       } else {
2641         fatal("unexpected type: %s", basictype_to_str(c->type()));
2642       }
2643       // cpu register - address
2644     } else if (opr2->is_address()) {
2645       if (op->info() != NULL) {
2646         add_debug_info_for_null_check_here(op->info());
2647       }
2648       __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2649     } else {
2650       ShouldNotReachHere();
2651     }
2652 


2712       // xmm register - constant
2713       __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2714     } else if (opr2->is_address()) {
2715       // xmm register - address
2716       if (op->info() != NULL) {
2717         add_debug_info_for_null_check_here(op->info());
2718       }
2719       __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2720     } else {
2721       ShouldNotReachHere();
2722     }
2723 
2724   } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2725     assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2726     assert(opr2->is_fpu_register(), "both must be registers");
2727     __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2728 
2729   } else if (opr1->is_address() && opr2->is_constant()) {
2730     LIR_Const* c = opr2->as_constant_ptr();
2731 #ifdef _LP64
2732     if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2733       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2734       __ movoop(rscratch1, c->as_jobject());
2735     }
2736 #endif // LP64
2737     if (op->info() != NULL) {
2738       add_debug_info_for_null_check_here(op->info());
2739     }
2740     // special case: address - constant
2741     LIR_Address* addr = opr1->as_address_ptr();
2742     if (c->type() == T_INT) {
2743       __ cmpl(as_Address(addr), c->as_jint());
2744     } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2745 #ifdef _LP64
2746       // %%% Make this explode if addr isn't reachable until we figure out a
2747       // better strategy by giving noreg as the temp for as_Address
2748       __ cmpoop(rscratch1, as_Address(addr, noreg));
2749 #else
2750       __ cmpoop(as_Address(addr), c->as_jobject());
2751 #endif // _LP64
2752     } else {
2753       ShouldNotReachHere();
2754     }
2755 
2756   } else {
2757     ShouldNotReachHere();
2758   }
2759 }
2760 
2761 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2762   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2763     if (left->is_single_xmm()) {
2764       assert(right->is_single_xmm(), "must match");




 183 }
 184 
 185 void LIR_Assembler::ffree(int i) {
 186   __ ffree(i);
 187 }
 188 
 189 void LIR_Assembler::breakpoint() {
 190   __ int3();
 191 }
 192 
 193 void LIR_Assembler::push(LIR_Opr opr) {
 194   if (opr->is_single_cpu()) {
 195     __ push_reg(opr->as_register());
 196   } else if (opr->is_double_cpu()) {
 197     NOT_LP64(__ push_reg(opr->as_register_hi()));
 198     __ push_reg(opr->as_register_lo());
 199   } else if (opr->is_stack()) {
 200     __ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
 201   } else if (opr->is_constant()) {
 202     LIR_Const* const_opr = opr->as_constant_ptr();
 203     if (const_opr->type() == T_OBJECT || const_opr->type() == T_VALUETYPE) {
 204       __ push_oop(const_opr->as_jobject());
 205     } else if (const_opr->type() == T_INT) {
 206       __ push_jint(const_opr->as_jint());
 207     } else {
 208       ShouldNotReachHere();
 209     }
 210 
 211   } else {
 212     ShouldNotReachHere();
 213   }
 214 }
 215 
 216 void LIR_Assembler::pop(LIR_Opr opr) {
 217   if (opr->is_single_cpu()) {
 218     __ pop_reg(opr->as_register());
 219   } else {
 220     ShouldNotReachHere();
 221   }
 222 }
 223 


 613       break;
 614     }
 615 
 616     case T_ADDRESS: {
 617       assert(patch_code == lir_patch_none, "no patching handled here");
 618       __ movptr(dest->as_register(), c->as_jint());
 619       break;
 620     }
 621 
 622     case T_LONG: {
 623       assert(patch_code == lir_patch_none, "no patching handled here");
 624 #ifdef _LP64
 625       __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
 626 #else
 627       __ movptr(dest->as_register_lo(), c->as_jint_lo());
 628       __ movptr(dest->as_register_hi(), c->as_jint_hi());
 629 #endif // _LP64
 630       break;
 631     }
 632 
 633     case T_VALUETYPE: // Fall through
 634     case T_OBJECT: {
 635       if (patch_code != lir_patch_none) {
 636         jobject2reg_with_patching(dest->as_register(), info);
 637       } else {
 638         __ movoop(dest->as_register(), c->as_jobject());
 639       }
 640       break;
 641     }
 642 
 643     case T_METADATA: {
 644       if (patch_code != lir_patch_none) {
 645         klass2reg_with_patching(dest->as_register(), info);
 646       } else {
 647         __ mov_metadata(dest->as_register(), c->as_metadata());
 648       }
 649       break;
 650     }
 651 
 652     case T_FLOAT: {
 653       if (dest->is_single_xmm()) {


 696     default:
 697       ShouldNotReachHere();
 698   }
 699 }
 700 
 701 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 702   assert(src->is_constant(), "should not call otherwise");
 703   assert(dest->is_stack(), "should not call otherwise");
 704   LIR_Const* c = src->as_constant_ptr();
 705 
 706   switch (c->type()) {
 707     case T_INT:  // fall through
 708     case T_FLOAT:
 709       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 710       break;
 711 
 712     case T_ADDRESS:
 713       __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
 714       break;
 715 
 716     case T_VALUETYPE: // Fall through
 717     case T_OBJECT:
 718       __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
 719       break;
 720 
 721     case T_LONG:  // fall through
 722     case T_DOUBLE:
 723 #ifdef _LP64
 724       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 725                                             lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
 726 #else
 727       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 728                                               lo_word_offset_in_bytes), c->as_jint_lo_bits());
 729       __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
 730                                               hi_word_offset_in_bytes), c->as_jint_hi_bits());
 731 #endif // _LP64
 732       break;
 733 
 734     default:
 735       ShouldNotReachHere();
 736   }
 737 }
 738 
 739 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
 740   assert(src->is_constant(), "should not call otherwise");
 741   assert(dest->is_address(), "should not call otherwise");
 742   LIR_Const* c = src->as_constant_ptr();
 743   LIR_Address* addr = dest->as_address_ptr();
 744 
 745   int null_check_here = code_offset();
 746   switch (type) {
 747     case T_INT:    // fall through
 748     case T_FLOAT:
 749       __ movl(as_Address(addr), c->as_jint_bits());
 750       break;
 751 
 752     case T_ADDRESS:
 753       __ movptr(as_Address(addr), c->as_jint_bits());
 754       break;
 755 
 756     case T_VALUETYPE: // fall through
 757     case T_OBJECT:  // fall through
 758     case T_ARRAY:
 759       if (c->as_jobject() == NULL) {
 760         if (UseCompressedOops && !wide) {
 761           __ movl(as_Address(addr), (int32_t)NULL_WORD);
 762         } else {
 763 #ifdef _LP64
 764           __ xorptr(rscratch1, rscratch1);
 765           null_check_here = code_offset();
 766           __ movptr(as_Address(addr), rscratch1);
 767 #else
 768           __ movptr(as_Address(addr), NULL_WORD);
 769 #endif
 770         }
 771       } else {
 772         if (is_literal_address(addr)) {
 773           ShouldNotReachHere();
 774           __ movoop(as_Address(addr, noreg), c->as_jobject());
 775         } else {
 776 #ifdef _LP64


 825   if (info != NULL) {
 826     add_debug_info_for_null_check(null_check_here, info);
 827   }
 828 }
 829 
 830 
 831 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 832   assert(src->is_register(), "should not call otherwise");
 833   assert(dest->is_register(), "should not call otherwise");
 834 
 835   // move between cpu-registers
 836   if (dest->is_single_cpu()) {
 837 #ifdef _LP64
 838     if (src->type() == T_LONG) {
 839       // Can do LONG -> OBJECT
 840       move_regs(src->as_register_lo(), dest->as_register());
 841       return;
 842     }
 843 #endif
 844     assert(src->is_single_cpu(), "must match");
 845     if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) {
 846       __ verify_oop(src->as_register());
 847     }
 848     move_regs(src->as_register(), dest->as_register());
 849 
 850   } else if (dest->is_double_cpu()) {
 851 #ifdef _LP64
 852     if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) {
 853       // Surprising to me but we can see move of a long to t_object
 854       __ verify_oop(src->as_register());
 855       move_regs(src->as_register(), dest->as_register_lo());
 856       return;
 857     }
 858 #endif
 859     assert(src->is_double_cpu(), "must match");
 860     Register f_lo = src->as_register_lo();
 861     Register f_hi = src->as_register_hi();
 862     Register t_lo = dest->as_register_lo();
 863     Register t_hi = dest->as_register_hi();
 864 #ifdef _LP64
 865     assert(f_hi == f_lo, "must be same");
 866     assert(t_hi == t_lo, "must be same");
 867     move_regs(f_lo, t_lo);
 868 #else
 869     assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
 870 
 871 
 872     if (f_lo == t_hi && f_hi == t_lo) {


 903     __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
 904   } else if (dest->is_double_xmm()) {
 905     assert(src->is_double_xmm(), "must match");
 906     __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
 907 
 908     // move between fpu-registers (no instruction necessary because of fpu-stack)
 909   } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
 910     assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
 911     assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
 912   } else {
 913     ShouldNotReachHere();
 914   }
 915 }
 916 
 917 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 918   assert(src->is_register(), "should not call otherwise");
 919   assert(dest->is_stack(), "should not call otherwise");
 920 
 921   if (src->is_single_cpu()) {
 922     Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
 923     if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) {
 924       __ verify_oop(src->as_register());
 925       __ movptr (dst, src->as_register());
 926     } else if (type == T_METADATA) {
 927       __ movptr (dst, src->as_register());
 928     } else {
 929       __ movl (dst, src->as_register());
 930     }
 931 
 932   } else if (src->is_double_cpu()) {
 933     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 934     Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
 935     __ movptr (dstLO, src->as_register_lo());
 936     NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
 937 
 938   } else if (src->is_single_xmm()) {
 939     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 940     __ movflt(dst_addr, src->as_xmm_float_reg());
 941 
 942   } else if (src->is_double_xmm()) {
 943     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());


 949     if (pop_fpu_stack)     __ fstp_s (dst_addr);
 950     else                   __ fst_s  (dst_addr);
 951 
 952   } else if (src->is_double_fpu()) {
 953     assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
 954     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 955     if (pop_fpu_stack)     __ fstp_d (dst_addr);
 956     else                   __ fst_d  (dst_addr);
 957 
 958   } else {
 959     ShouldNotReachHere();
 960   }
 961 }
 962 
 963 
 964 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 965   LIR_Address* to_addr = dest->as_address_ptr();
 966   PatchingStub* patch = NULL;
 967   Register compressed_src = rscratch1;
 968 
 969   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
 970     __ verify_oop(src->as_register());
 971 #ifdef _LP64
 972     if (UseCompressedOops && !wide) {
 973       __ movptr(compressed_src, src->as_register());
 974       __ encode_heap_oop(compressed_src);
 975       if (patch_code != lir_patch_none) {
 976         info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
 977       }
 978     }
 979 #endif
 980   }
 981 
 982   if (patch_code != lir_patch_none) {
 983     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
 984     Address toa = as_Address(to_addr);
 985     assert(toa.disp() != 0, "must have");
 986   }
 987 
 988   int null_check_here = code_offset();
 989   switch (type) {


 994         assert(src->is_single_fpu(), "must be");
 995         assert(src->fpu_regnr() == 0, "argument must be on TOS");
 996         if (pop_fpu_stack)      __ fstp_s(as_Address(to_addr));
 997         else                    __ fst_s (as_Address(to_addr));
 998       }
 999       break;
1000     }
1001 
1002     case T_DOUBLE: {
1003       if (src->is_double_xmm()) {
1004         __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1005       } else {
1006         assert(src->is_double_fpu(), "must be");
1007         assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1008         if (pop_fpu_stack)      __ fstp_d(as_Address(to_addr));
1009         else                    __ fst_d (as_Address(to_addr));
1010       }
1011       break;
1012     }
1013 
1014     case T_VALUETYPE: // fall through
1015     case T_ARRAY:   // fall through
1016     case T_OBJECT:  // fall through
1017       if (UseCompressedOops && !wide) {
1018         __ movl(as_Address(to_addr), compressed_src);
1019       } else {
1020         __ movptr(as_Address(to_addr), src->as_register());
1021       }
1022       break;
1023     case T_METADATA:
1024       // We get here to store a method pointer to the stack to pass to
1025       // a dtrace runtime call. This can't work on 64 bit with
1026       // compressed klass ptrs: T_METADATA can be a compressed klass
1027       // ptr or a 64 bit method pointer.
1028       LP64_ONLY(ShouldNotReachHere());
1029       __ movptr(as_Address(to_addr), src->as_register());
1030       break;
1031     case T_ADDRESS:
1032       __ movptr(as_Address(to_addr), src->as_register());
1033       break;
1034     case T_INT:


1085       break;
1086 
1087     default:
1088       ShouldNotReachHere();
1089   }
1090   if (info != NULL) {
1091     add_debug_info_for_null_check(null_check_here, info);
1092   }
1093 
1094   if (patch_code != lir_patch_none) {
1095     patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1096   }
1097 }
1098 
1099 
1100 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1101   assert(src->is_stack(), "should not call otherwise");
1102   assert(dest->is_register(), "should not call otherwise");
1103 
1104   if (dest->is_single_cpu()) {
1105     if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1106       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1107       __ verify_oop(dest->as_register());
1108     } else if (type == T_METADATA) {
1109       __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1110     } else {
1111       __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1112     }
1113 
1114   } else if (dest->is_double_cpu()) {
1115     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1116     Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1117     __ movptr(dest->as_register_lo(), src_addr_LO);
1118     NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1119 
1120   } else if (dest->is_single_xmm()) {
1121     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1122     __ movflt(dest->as_xmm_float_reg(), src_addr);
1123 
1124   } else if (dest->is_double_xmm()) {
1125     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1126     __ movdbl(dest->as_xmm_double_reg(), src_addr);
1127 
1128   } else if (dest->is_single_fpu()) {
1129     assert(dest->fpu_regnr() == 0, "dest must be TOS");
1130     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1131     __ fld_s(src_addr);
1132 
1133   } else if (dest->is_double_fpu()) {
1134     assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1135     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1136     __ fld_d(src_addr);
1137 
1138   } else {
1139     ShouldNotReachHere();
1140   }
1141 }
1142 
1143 
1144 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1145   if (src->is_single_stack()) {
1146     if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) {
1147       __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1148       __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1149     } else {
1150 #ifndef _LP64
1151       __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1152       __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1153 #else
1154       //no pushl on 64bits
1155       __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1156       __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1157 #endif
1158     }
1159 
1160   } else if (src->is_double_stack()) {
1161 #ifdef _LP64
1162     __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1163     __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1164 #else
1165     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1166     // push and pop the part at src + wordSize, adding wordSize for the previous push
1167     __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1168     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1169     __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1170 #endif // _LP64
1171 
1172   } else {
1173     ShouldNotReachHere();
1174   }
1175 }
1176 
1177 
1178 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1179   assert(src->is_address(), "should not call otherwise");
1180   assert(dest->is_register(), "should not call otherwise");
1181 
1182   LIR_Address* addr = src->as_address_ptr();
1183   Address from_addr = as_Address(addr);
1184 
1185   if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) {
1186     __ verify_oop(addr->base()->as_pointer_register());
1187   }
1188 
1189   switch (type) {
1190     case T_BOOLEAN: // fall through
1191     case T_BYTE:    // fall through
1192     case T_CHAR:    // fall through
1193     case T_SHORT:
1194       if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1195         // on pre P6 processors we may get partial register stalls
1196         // so blow away the value of to_rinfo before loading a
1197         // partial word into it.  Do it here so that it precedes
1198         // the potential patch point below.
1199         __ xorptr(dest->as_register(), dest->as_register());
1200       }
1201       break;
1202    default:
1203      break;
1204   }
1205 


1218         __ movflt(dest->as_xmm_float_reg(), from_addr);
1219       } else {
1220         assert(dest->is_single_fpu(), "must be");
1221         assert(dest->fpu_regnr() == 0, "dest must be TOS");
1222         __ fld_s(from_addr);
1223       }
1224       break;
1225     }
1226 
1227     case T_DOUBLE: {
1228       if (dest->is_double_xmm()) {
1229         __ movdbl(dest->as_xmm_double_reg(), from_addr);
1230       } else {
1231         assert(dest->is_double_fpu(), "must be");
1232         assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1233         __ fld_d(from_addr);
1234       }
1235       break;
1236     }
1237 
1238     case T_VALUETYPE: // fall through
1239     case T_OBJECT:  // fall through
1240     case T_ARRAY:   // fall through
1241       if (UseCompressedOops && !wide) {
1242         __ movl(dest->as_register(), from_addr);
1243       } else {
1244         __ movptr(dest->as_register(), from_addr);
1245       }
1246       break;
1247 
1248     case T_ADDRESS:
1249       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1250         __ movl(dest->as_register(), from_addr);
1251       } else {
1252         __ movptr(dest->as_register(), from_addr);
1253       }
1254       break;
1255     case T_INT:
1256       __ movl(dest->as_register(), from_addr);
1257       break;
1258 


1328     case T_SHORT: {
1329       Register dest_reg = dest->as_register();
1330       if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1331         __ movswl(dest_reg, from_addr);
1332       } else {
1333         __ movw(dest_reg, from_addr);
1334         __ shll(dest_reg, 16);
1335         __ sarl(dest_reg, 16);
1336       }
1337       break;
1338     }
1339 
1340     default:
1341       ShouldNotReachHere();
1342   }
1343 
1344   if (patch != NULL) {
1345     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1346   }
1347 
1348   if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
1349 #ifdef _LP64
1350     if (UseCompressedOops && !wide) {
1351       __ decode_heap_oop(dest->as_register());
1352     }
1353 #endif
1354 
1355     // Load barrier has not yet been applied, so ZGC can't verify the oop here
1356     if (!UseZGC) {
1357       __ verify_oop(dest->as_register());
1358     }
1359   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1360 #ifdef _LP64
1361     if (UseCompressedClassPointers) {
1362       __ decode_klass_not_null(dest->as_register());
1363     }
1364 #endif
1365   }
1366 }
1367 
1368 


1565     add_debug_info_for_null_check_here(op->stub()->info());
1566     __ cmpb(Address(op->klass()->as_register(),
1567                     InstanceKlass::init_state_offset()),
1568                     InstanceKlass::fully_initialized);
1569     __ jcc(Assembler::notEqual, *op->stub()->entry());
1570   }
1571   __ allocate_object(op->obj()->as_register(),
1572                      op->tmp1()->as_register(),
1573                      op->tmp2()->as_register(),
1574                      op->header_size(),
1575                      op->object_size(),
1576                      op->klass()->as_register(),
1577                      *op->stub()->entry());
1578   __ bind(*op->stub()->continuation());
1579 }
1580 
1581 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1582   Register len =  op->len()->as_register();
1583   LP64_ONLY( __ movslq(len, len); )
1584 
1585   if (UseSlowPath || op->type() == T_VALUETYPE ||
1586       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1587       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1588     __ jmp(*op->stub()->entry());
1589   } else {
1590     Register tmp1 = op->tmp1()->as_register();
1591     Register tmp2 = op->tmp2()->as_register();
1592     Register tmp3 = op->tmp3()->as_register();
1593     if (len == tmp1) {
1594       tmp1 = tmp3;
1595     } else if (len == tmp2) {
1596       tmp2 = tmp3;
1597     } else if (len == tmp3) {
1598       // everything is ok
1599     } else {
1600       __ mov(tmp3, len);
1601     }
1602     __ allocate_array(op->obj()->as_register(),
1603                       len,
1604                       tmp1,
1605                       tmp2,


2486       int r_hi = right->as_constant_ptr()->as_jint_hi();
2487       switch (code) {
2488         case lir_logic_and:
2489           __ andl(l_lo, r_lo);
2490           __ andl(l_hi, r_hi);
2491           break;
2492         case lir_logic_or:
2493           __ orl(l_lo, r_lo);
2494           __ orl(l_hi, r_hi);
2495           break;
2496         case lir_logic_xor:
2497           __ xorl(l_lo, r_lo);
2498           __ xorl(l_hi, r_hi);
2499           break;
2500         default: ShouldNotReachHere();
2501       }
2502 #endif // _LP64
2503     } else {
2504 #ifdef _LP64
2505       Register r_lo;
2506       if (right->type() == T_OBJECT || right->type() == T_ARRAY || right->type() == T_VALUETYPE) {
2507         r_lo = right->as_register();
2508       } else {
2509         r_lo = right->as_register_lo();
2510       }
2511 #else
2512       Register r_lo = right->as_register_lo();
2513       Register r_hi = right->as_register_hi();
2514       assert(l_lo != r_hi, "overwriting registers");
2515 #endif
2516       switch (code) {
2517         case lir_logic_and:
2518           __ andptr(l_lo, r_lo);
2519           NOT_LP64(__ andptr(l_hi, r_hi);)
2520           break;
2521         case lir_logic_or:
2522           __ orptr(l_lo, r_lo);
2523           NOT_LP64(__ orptr(l_hi, r_hi);)
2524           break;
2525         case lir_logic_xor:
2526           __ xorptr(l_lo, r_lo);


2599     move_regs(lreg, rax);
2600 
2601     int idivl_offset = __ corrected_idivl(rreg);
2602     if (ImplicitDiv0Checks) {
2603       add_debug_info_for_div0(idivl_offset, info);
2604     }
2605     if (code == lir_irem) {
2606       move_regs(rdx, dreg); // result is in rdx
2607     } else {
2608       move_regs(rax, dreg);
2609     }
2610   }
2611 }
2612 
2613 
2614 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2615   if (opr1->is_single_cpu()) {
2616     Register reg1 = opr1->as_register();
2617     if (opr2->is_single_cpu()) {
2618       // cpu register - cpu register
2619       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
2620         __ cmpoop(reg1, opr2->as_register());
2621       } else {
2622         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?");
2623         __ cmpl(reg1, opr2->as_register());
2624       }
2625     } else if (opr2->is_stack()) {
2626       // cpu register - stack
2627       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
2628         __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2629       } else {
2630         __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2631       }
2632     } else if (opr2->is_constant()) {
2633       // cpu register - constant
2634       LIR_Const* c = opr2->as_constant_ptr();
2635       if (c->type() == T_INT) {
2636         __ cmpl(reg1, c->as_jint());
2637       } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
2638         // In 64bit oops are single register
2639         jobject o = c->as_jobject();
2640         if (o == NULL) {
2641           __ cmpptr(reg1, (int32_t)NULL_WORD);
2642         } else {
2643           __ cmpoop(reg1, o);
2644         }
2645       } else {
2646         fatal("unexpected type: %s", basictype_to_str(c->type()));
2647       }
2648       // cpu register - address
2649     } else if (opr2->is_address()) {
2650       if (op->info() != NULL) {
2651         add_debug_info_for_null_check_here(op->info());
2652       }
2653       __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2654     } else {
2655       ShouldNotReachHere();
2656     }
2657 


2717       // xmm register - constant
2718       __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2719     } else if (opr2->is_address()) {
2720       // xmm register - address
2721       if (op->info() != NULL) {
2722         add_debug_info_for_null_check_here(op->info());
2723       }
2724       __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2725     } else {
2726       ShouldNotReachHere();
2727     }
2728 
2729   } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2730     assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2731     assert(opr2->is_fpu_register(), "both must be registers");
2732     __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2733 
2734   } else if (opr1->is_address() && opr2->is_constant()) {
2735     LIR_Const* c = opr2->as_constant_ptr();
2736 #ifdef _LP64
2737     if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
2738       assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2739       __ movoop(rscratch1, c->as_jobject());
2740     }
2741 #endif // LP64
2742     if (op->info() != NULL) {
2743       add_debug_info_for_null_check_here(op->info());
2744     }
2745     // special case: address - constant
2746     LIR_Address* addr = opr1->as_address_ptr();
2747     if (c->type() == T_INT) {
2748       __ cmpl(as_Address(addr), c->as_jint());
2749     } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
2750 #ifdef _LP64
2751       // %%% Make this explode if addr isn't reachable until we figure out a
2752       // better strategy by giving noreg as the temp for as_Address
2753       __ cmpoop(rscratch1, as_Address(addr, noreg));
2754 #else
2755       __ cmpoop(as_Address(addr), c->as_jobject());
2756 #endif // _LP64
2757     } else {
2758       ShouldNotReachHere();
2759     }
2760 
2761   } else {
2762     ShouldNotReachHere();
2763   }
2764 }
2765 
2766 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2767   if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2768     if (left->is_single_xmm()) {
2769       assert(right->is_single_xmm(), "must match");


< prev index next >