314 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
315 __ jcc(Assembler::notZero, L);
316 __ stop("locked object is NULL");
317 __ bind(L);
318 }
319 #endif
320 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
321 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
322 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
323 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
324 }
325 }
326 }
327
328
329 // inline cache check; done before the frame is built.
330 int LIR_Assembler::check_icache() {
331 Register receiver = FrameMap::receiver_opr->as_register();
332 Register ic_klass = IC_Klass;
333 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
334
335 if (!VerifyOops) {
336 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
337 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
338 __ nop();
339 }
340 }
341 int offset = __ offset();
342 __ inline_cache_check(receiver, IC_Klass);
343 assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct");
344 if (VerifyOops) {
345 // force alignment after the cache check.
346 // It's been verified to be aligned if !VerifyOops
347 __ align(CodeEntryAlignment);
348 }
349 return offset;
350 }
351
352
353 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
354 jobject o = NULL;
355 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
356 __ movoop(reg, o);
357 patching_epilog(patch, lir_patch_normal, reg, info);
358 }
359
360
361 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
362 if (exception->is_valid()) {
363 // preserve exception
364 // note: the monitor_exit runtime call is a leaf routine
530 int offset = code_offset();
531 InternalAddress here(__ pc());
532
533 __ pushptr(here.addr());
534 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
535
536 assert(code_offset() - offset <= deopt_handler_size, "overflow");
537 __ end_a_stub();
538
539 return offset;
540 }
541
542
543 // This is the fast version of java.lang.String.compare; it has not
544 // OSR-entry and therefore, we generate a slow version for OSR's
545 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
546 __ movptr (rbx, rcx); // receiver is in rcx
547 __ movptr (rax, arg1->as_register());
548
549 // Get addresses of first characters from both Strings
550 __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
551 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
552 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
553
554
555 // rbx, may be NULL
556 add_debug_info_for_null_check_here(info);
557 __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
558 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
559 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
560
561 // compute minimum length (in rax) and difference of lengths (on top of stack)
562 if (VM_Version::supports_cmov()) {
563 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
564 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
565 __ mov (rcx, rbx);
566 __ subptr (rbx, rax); // subtract lengths
567 __ push (rbx); // result
568 __ cmov (Assembler::lessEqual, rax, rcx);
569 } else {
570 Label L;
571 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
572 __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes()));
573 __ mov (rax, rbx);
574 __ subptr (rbx, rcx);
575 __ push (rbx);
576 __ jcc (Assembler::lessEqual, L);
577 __ mov (rax, rcx);
667 __ test32(rax, polling_page);
668 return offset;
669 }
670
671
672 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
673 if (from_reg != to_reg) __ mov(to_reg, from_reg);
674 }
675
676 void LIR_Assembler::swap_reg(Register a, Register b) {
677 __ xchgptr(a, b);
678 }
679
680
681 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
682 assert(src->is_constant(), "should not call otherwise");
683 assert(dest->is_register(), "should not call otherwise");
684 LIR_Const* c = src->as_constant_ptr();
685
686 switch (c->type()) {
687 case T_INT:
688 case T_ADDRESS: {
689 assert(patch_code == lir_patch_none, "no patching handled here");
690 __ movl(dest->as_register(), c->as_jint());
691 break;
692 }
693
694 case T_LONG: {
695 assert(patch_code == lir_patch_none, "no patching handled here");
696 #ifdef _LP64
697 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
698 #else
699 __ movptr(dest->as_register_lo(), c->as_jint_lo());
700 __ movptr(dest->as_register_hi(), c->as_jint_hi());
701 #endif // _LP64
702 break;
703 }
704
705 case T_OBJECT: {
706 if (patch_code != lir_patch_none) {
707 jobject2reg_with_patching(dest->as_register(), info);
708 } else {
709 __ movoop(dest->as_register(), c->as_jobject());
710 }
711 break;
712 }
713
751 } else {
752 __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
753 }
754 }
755 break;
756 }
757
758 default:
759 ShouldNotReachHere();
760 }
761 }
762
763 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
764 assert(src->is_constant(), "should not call otherwise");
765 assert(dest->is_stack(), "should not call otherwise");
766 LIR_Const* c = src->as_constant_ptr();
767
768 switch (c->type()) {
769 case T_INT: // fall through
770 case T_FLOAT:
771 case T_ADDRESS:
772 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
773 break;
774
775 case T_OBJECT:
776 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
777 break;
778
779 case T_LONG: // fall through
780 case T_DOUBLE:
781 #ifdef _LP64
782 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
783 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
784 #else
785 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
786 lo_word_offset_in_bytes), c->as_jint_lo_bits());
787 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
788 hi_word_offset_in_bytes), c->as_jint_hi_bits());
789 #endif // _LP64
790 break;
791
792 default:
793 ShouldNotReachHere();
794 }
795 }
796
797 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
798 assert(src->is_constant(), "should not call otherwise");
799 assert(dest->is_address(), "should not call otherwise");
800 LIR_Const* c = src->as_constant_ptr();
801 LIR_Address* addr = dest->as_address_ptr();
802
803 int null_check_here = code_offset();
804 switch (type) {
805 case T_INT: // fall through
806 case T_FLOAT:
807 case T_ADDRESS:
808 __ movl(as_Address(addr), c->as_jint_bits());
809 break;
810
811 case T_OBJECT: // fall through
812 case T_ARRAY:
813 if (c->as_jobject() == NULL) {
814 __ movptr(as_Address(addr), NULL_WORD);
815 } else {
816 if (is_literal_address(addr)) {
817 ShouldNotReachHere();
818 __ movoop(as_Address(addr, noreg), c->as_jobject());
819 } else {
820 #ifdef _LP64
821 __ movoop(rscratch1, c->as_jobject());
822 null_check_here = code_offset();
823 __ movptr(as_Address_lo(addr), rscratch1);
824 #else
825 __ movoop(as_Address(addr), c->as_jobject());
826 #endif
827 }
828 }
829 break;
830
831 case T_LONG: // fall through
832 case T_DOUBLE:
833 #ifdef _LP64
834 if (is_literal_address(addr)) {
835 ShouldNotReachHere();
836 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
837 } else {
838 __ movptr(r10, (intptr_t)c->as_jlong_bits());
839 null_check_here = code_offset();
840 __ movptr(as_Address_lo(addr), r10);
841 }
842 #else
843 // Always reachable in 32bit so this doesn't produce useless move literal
980 __ movdbl(dst_addr, src->as_xmm_double_reg());
981
982 } else if (src->is_single_fpu()) {
983 assert(src->fpu_regnr() == 0, "argument must be on TOS");
984 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
985 if (pop_fpu_stack) __ fstp_s (dst_addr);
986 else __ fst_s (dst_addr);
987
988 } else if (src->is_double_fpu()) {
989 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
990 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
991 if (pop_fpu_stack) __ fstp_d (dst_addr);
992 else __ fst_d (dst_addr);
993
994 } else {
995 ShouldNotReachHere();
996 }
997 }
998
999
1000 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) {
1001 LIR_Address* to_addr = dest->as_address_ptr();
1002 PatchingStub* patch = NULL;
1003
1004 if (type == T_ARRAY || type == T_OBJECT) {
1005 __ verify_oop(src->as_register());
1006 }
1007 if (patch_code != lir_patch_none) {
1008 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1009 Address toa = as_Address(to_addr);
1010 assert(toa.disp() != 0, "must have");
1011 }
1012 if (info != NULL) {
1013 add_debug_info_for_null_check_here(info);
1014 }
1015
1016 switch (type) {
1017 case T_FLOAT: {
1018 if (src->is_single_xmm()) {
1019 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1020 } else {
1021 assert(src->is_single_fpu(), "must be");
1022 assert(src->fpu_regnr() == 0, "argument must be on TOS");
1023 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1024 else __ fst_s (as_Address(to_addr));
1025 }
1026 break;
1027 }
1028
1029 case T_DOUBLE: {
1030 if (src->is_double_xmm()) {
1031 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1032 } else {
1033 assert(src->is_double_fpu(), "must be");
1034 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1035 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1036 else __ fst_d (as_Address(to_addr));
1037 }
1038 break;
1039 }
1040
1041 case T_ADDRESS: // fall through
1042 case T_ARRAY: // fall through
1043 case T_OBJECT: // fall through
1044 #ifdef _LP64
1045 __ movptr(as_Address(to_addr), src->as_register());
1046 break;
1047 #endif // _LP64
1048 case T_INT:
1049 __ movl(as_Address(to_addr), src->as_register());
1050 break;
1051
1052 case T_LONG: {
1053 Register from_lo = src->as_register_lo();
1054 Register from_hi = src->as_register_hi();
1055 #ifdef _LP64
1056 __ movptr(as_Address_lo(to_addr), from_lo);
1057 #else
1058 Register base = to_addr->base()->as_register();
1059 Register index = noreg;
1060 if (to_addr->index()->is_register()) {
1061 index = to_addr->index()->as_register();
1062 }
1063 if (base == from_lo || index == from_lo) {
1064 assert(base != from_hi, "can't be");
1065 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1066 __ movl(as_Address_hi(to_addr), from_hi);
1067 if (patch != NULL) {
1084 break;
1085 }
1086
1087 case T_BYTE: // fall through
1088 case T_BOOLEAN: {
1089 Register src_reg = src->as_register();
1090 Address dst_addr = as_Address(to_addr);
1091 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1092 __ movb(dst_addr, src_reg);
1093 break;
1094 }
1095
1096 case T_CHAR: // fall through
1097 case T_SHORT:
1098 __ movw(as_Address(to_addr), src->as_register());
1099 break;
1100
1101 default:
1102 ShouldNotReachHere();
1103 }
1104
1105 if (patch_code != lir_patch_none) {
1106 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1107 }
1108 }
1109
1110
1111 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1112 assert(src->is_stack(), "should not call otherwise");
1113 assert(dest->is_register(), "should not call otherwise");
1114
1115 if (dest->is_single_cpu()) {
1116 if (type == T_ARRAY || type == T_OBJECT) {
1117 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1118 __ verify_oop(dest->as_register());
1119 } else {
1120 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1121 }
1122
1123 } else if (dest->is_double_cpu()) {
1167 }
1168
1169 } else if (src->is_double_stack()) {
1170 #ifdef _LP64
1171 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1172 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1173 #else
1174 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1175 // push and pop the part at src + wordSize, adding wordSize for the previous push
1176 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1177 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1178 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1179 #endif // _LP64
1180
1181 } else {
1182 ShouldNotReachHere();
1183 }
1184 }
1185
1186
1187 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) {
1188 assert(src->is_address(), "should not call otherwise");
1189 assert(dest->is_register(), "should not call otherwise");
1190
1191 LIR_Address* addr = src->as_address_ptr();
1192 Address from_addr = as_Address(addr);
1193
1194 switch (type) {
1195 case T_BOOLEAN: // fall through
1196 case T_BYTE: // fall through
1197 case T_CHAR: // fall through
1198 case T_SHORT:
1199 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1200 // on pre P6 processors we may get partial register stalls
1201 // so blow away the value of to_rinfo before loading a
1202 // partial word into it. Do it here so that it precedes
1203 // the potential patch point below.
1204 __ xorptr(dest->as_register(), dest->as_register());
1205 }
1206 break;
1207 }
1221 __ movflt(dest->as_xmm_float_reg(), from_addr);
1222 } else {
1223 assert(dest->is_single_fpu(), "must be");
1224 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1225 __ fld_s(from_addr);
1226 }
1227 break;
1228 }
1229
1230 case T_DOUBLE: {
1231 if (dest->is_double_xmm()) {
1232 __ movdbl(dest->as_xmm_double_reg(), from_addr);
1233 } else {
1234 assert(dest->is_double_fpu(), "must be");
1235 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1236 __ fld_d(from_addr);
1237 }
1238 break;
1239 }
1240
1241 case T_ADDRESS: // fall through
1242 case T_OBJECT: // fall through
1243 case T_ARRAY: // fall through
1244 #ifdef _LP64
1245 __ movptr(dest->as_register(), from_addr);
1246 break;
1247 #endif // _L64
1248 case T_INT:
1249 __ movl(dest->as_register(), from_addr);
1250 break;
1251
1252 case T_LONG: {
1253 Register to_lo = dest->as_register_lo();
1254 Register to_hi = dest->as_register_hi();
1255 #ifdef _LP64
1256 __ movptr(to_lo, as_Address_lo(addr));
1257 #else
1258 Register base = addr->base()->as_register();
1259 Register index = noreg;
1260 if (addr->index()->is_register()) {
1261 index = addr->index()->as_register();
1262 }
1263 if ((base == to_lo && index == to_hi) ||
1264 (base == to_hi && index == to_lo)) {
1265 // addresses with 2 registers are only formed as a result of
1266 // array access so this code will never have to deal with
1267 // patches or null checks.
1322 Register dest_reg = dest->as_register();
1323 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1324 __ movswl(dest_reg, from_addr);
1325 } else {
1326 __ movw(dest_reg, from_addr);
1327 __ shll(dest_reg, 16);
1328 __ sarl(dest_reg, 16);
1329 }
1330 break;
1331 }
1332
1333 default:
1334 ShouldNotReachHere();
1335 }
1336
1337 if (patch != NULL) {
1338 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1339 }
1340
1341 if (type == T_ARRAY || type == T_OBJECT) {
1342 __ verify_oop(dest->as_register());
1343 }
1344 }
1345
1346
1347 void LIR_Assembler::prefetchr(LIR_Opr src) {
1348 LIR_Address* addr = src->as_address_ptr();
1349 Address from_addr = as_Address(addr);
1350
1351 if (VM_Version::supports_sse()) {
1352 switch (ReadPrefetchInstr) {
1353 case 0:
1354 __ prefetchnta(from_addr); break;
1355 case 1:
1356 __ prefetcht0(from_addr); break;
1357 case 2:
1358 __ prefetcht2(from_addr); break;
1359 default:
1360 ShouldNotReachHere(); break;
1361 }
1661 assert(method != NULL, "Should have method");
1662 int bci = op->profiled_bci();
1663 md = method->method_data();
1664 if (md == NULL) {
1665 bailout("out of memory building methodDataOop");
1666 return;
1667 }
1668 data = md->bci_to_data(bci);
1669 assert(data != NULL, "need data for type check");
1670 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1671 }
1672 Label profile_cast_success, profile_cast_failure;
1673 Label *success_target = op->should_profile() ? &profile_cast_success : success;
1674 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1675
1676 if (obj == k_RInfo) {
1677 k_RInfo = dst;
1678 } else if (obj == klass_RInfo) {
1679 klass_RInfo = dst;
1680 }
1681 if (k->is_loaded()) {
1682 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1683 } else {
1684 Rtmp1 = op->tmp3()->as_register();
1685 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1686 }
1687
1688 assert_different_registers(obj, k_RInfo, klass_RInfo);
1689 if (!k->is_loaded()) {
1690 jobject2reg_with_patching(k_RInfo, op->info_for_patch());
1691 } else {
1692 #ifdef _LP64
1693 __ movoop(k_RInfo, k->constant_encoding());
1694 #endif // _LP64
1695 }
1696 assert(obj != k_RInfo, "must be different");
1697
1698 __ cmpptr(obj, (int32_t)NULL_WORD);
1699 if (op->should_profile()) {
1700 Label not_null;
1701 __ jccb(Assembler::notEqual, not_null);
1702 // Object is null; update MDO and exit
1703 Register mdo = klass_RInfo;
1704 __ movoop(mdo, md->constant_encoding());
1705 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1706 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1707 __ orl(data_addr, header_bits);
1708 __ jmp(*obj_is_null);
1709 __ bind(not_null);
1710 } else {
1711 __ jcc(Assembler::equal, *obj_is_null);
1712 }
1713 __ verify_oop(obj);
1714
1715 if (op->fast_check()) {
1716 // get object class
1717 // not a safepoint as obj null check happens earlier
1718 if (k->is_loaded()) {
1719 #ifdef _LP64
1720 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1721 #else
1722 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1723 #endif // _LP64
1724 } else {
1725 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1726 }
1727 __ jcc(Assembler::notEqual, *failure_target);
1728 // successful cast, fall through to profile or jump
1729 } else {
1730 // get object class
1731 // not a safepoint as obj null check happens earlier
1732 __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1733 if (k->is_loaded()) {
1734 // See if we get an immediate positive hit
1735 #ifdef _LP64
1736 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1737 #else
1738 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1739 #endif // _LP64
1740 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
1741 __ jcc(Assembler::notEqual, *failure_target);
1742 // successful cast, fall through to profile or jump
1743 } else {
1744 // See if we get an immediate positive hit
1745 __ jcc(Assembler::equal, *success_target);
1746 // check for self
1747 #ifdef _LP64
1748 __ cmpptr(klass_RInfo, k_RInfo);
1749 #else
1750 __ cmpoop(klass_RInfo, k->constant_encoding());
1751 #endif // _LP64
1752 __ jcc(Assembler::equal, *success_target);
1767 }
1768 } else {
1769 // perform the fast part of the checking logic
1770 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1771 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1772 __ push(klass_RInfo);
1773 __ push(k_RInfo);
1774 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1775 __ pop(klass_RInfo);
1776 __ pop(k_RInfo);
1777 // result is a boolean
1778 __ cmpl(k_RInfo, 0);
1779 __ jcc(Assembler::equal, *failure_target);
1780 // successful cast, fall through to profile or jump
1781 }
1782 }
1783 if (op->should_profile()) {
1784 Register mdo = klass_RInfo, recv = k_RInfo;
1785 __ bind(profile_cast_success);
1786 __ movoop(mdo, md->constant_encoding());
1787 __ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
1788 Label update_done;
1789 type_profile_helper(mdo, md, data, recv, success);
1790 __ jmp(*success);
1791
1792 __ bind(profile_cast_failure);
1793 __ movoop(mdo, md->constant_encoding());
1794 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1795 __ subptr(counter_addr, DataLayout::counter_increment);
1796 __ jmp(*failure);
1797 }
1798 __ jmp(*success);
1799 }
1800
1801
1802 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1803 LIR_Code code = op->code();
1804 if (code == lir_store_check) {
1805 Register value = op->object()->as_register();
1806 Register array = op->array()->as_register();
1807 Register k_RInfo = op->tmp1()->as_register();
1831 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1832 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1833
1834 __ cmpptr(value, (int32_t)NULL_WORD);
1835 if (op->should_profile()) {
1836 Label not_null;
1837 __ jccb(Assembler::notEqual, not_null);
1838 // Object is null; update MDO and exit
1839 Register mdo = klass_RInfo;
1840 __ movoop(mdo, md->constant_encoding());
1841 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1842 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1843 __ orl(data_addr, header_bits);
1844 __ jmp(done);
1845 __ bind(not_null);
1846 } else {
1847 __ jcc(Assembler::equal, done);
1848 }
1849
1850 add_debug_info_for_null_check_here(op->info_for_exception());
1851 __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
1852 __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
1853
1854 // get instance klass
1855 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
1856 // perform the fast part of the checking logic
1857 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1858 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1859 __ push(klass_RInfo);
1860 __ push(k_RInfo);
1861 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1862 __ pop(klass_RInfo);
1863 __ pop(k_RInfo);
1864 // result is a boolean
1865 __ cmpl(k_RInfo, 0);
1866 __ jcc(Assembler::equal, *failure_target);
1867 // fall through to the success case
1868
1869 if (op->should_profile()) {
1870 Register mdo = klass_RInfo, recv = k_RInfo;
1871 __ bind(profile_cast_success);
1872 __ movoop(mdo, md->constant_encoding());
1873 __ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes()));
1874 Label update_done;
1875 type_profile_helper(mdo, md, data, recv, &done);
1876 __ jmpb(done);
1877
1878 __ bind(profile_cast_failure);
1879 __ movoop(mdo, md->constant_encoding());
1880 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1881 __ subptr(counter_addr, DataLayout::counter_increment);
1882 __ jmp(*stub->entry());
1883 }
1884
1885 __ bind(done);
1886 } else
1887 if (code == lir_checkcast) {
1888 Register obj = op->object()->as_register();
1889 Register dst = op->result_opr()->as_register();
1890 Label success;
1891 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1892 __ bind(success);
1893 if (dst != obj) {
1917 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1918 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1919 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1920 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1921 Register addr = op->addr()->as_register();
1922 if (os::is_MP()) {
1923 __ lock();
1924 }
1925 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1926
1927 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1928 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1929 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1930 Register newval = op->new_value()->as_register();
1931 Register cmpval = op->cmp_value()->as_register();
1932 assert(cmpval == rax, "wrong register");
1933 assert(newval != NULL, "new val must be register");
1934 assert(cmpval != newval, "cmp and new values must be in different registers");
1935 assert(cmpval != addr, "cmp and addr must be in different registers");
1936 assert(newval != addr, "new value and addr must be in different registers");
1937 if (os::is_MP()) {
1938 __ lock();
1939 }
1940 if ( op->code() == lir_cas_obj) {
1941 __ cmpxchgptr(newval, Address(addr, 0));
1942 } else if (op->code() == lir_cas_int) {
1943 __ cmpxchgl(newval, Address(addr, 0));
1944 }
1945 #ifdef _LP64
1946 } else if (op->code() == lir_cas_long) {
1947 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1948 Register newval = op->new_value()->as_register_lo();
1949 Register cmpval = op->cmp_value()->as_register_lo();
1950 assert(cmpval == rax, "wrong register");
1951 assert(newval != NULL, "new val must be register");
1952 assert(cmpval != newval, "cmp and new values must be in different registers");
1953 assert(cmpval != addr, "cmp and addr must be in different registers");
1954 assert(newval != addr, "new value and addr must be in different registers");
1955 if (os::is_MP()) {
1956 __ lock();
1957 }
1958 __ cmpxchgq(newval, Address(addr, 0));
1959 #endif // _LP64
1960 } else {
1961 Unimplemented();
1962 }
3164 __ testl(dst_pos, dst_pos);
3165 __ jcc(Assembler::less, *stub->entry());
3166 }
3167 if (flags & LIR_OpArrayCopy::length_positive_check) {
3168 __ testl(length, length);
3169 __ jcc(Assembler::less, *stub->entry());
3170 }
3171
3172 if (flags & LIR_OpArrayCopy::src_range_check) {
3173 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3174 __ cmpl(tmp, src_length_addr);
3175 __ jcc(Assembler::above, *stub->entry());
3176 }
3177 if (flags & LIR_OpArrayCopy::dst_range_check) {
3178 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3179 __ cmpl(tmp, dst_length_addr);
3180 __ jcc(Assembler::above, *stub->entry());
3181 }
3182
3183 if (flags & LIR_OpArrayCopy::type_check) {
3184 __ movptr(tmp, src_klass_addr);
3185 __ cmpptr(tmp, dst_klass_addr);
3186 __ jcc(Assembler::notEqual, *stub->entry());
3187 }
3188
3189 #ifdef ASSERT
3190 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3191 // Sanity check the known type with the incoming class. For the
3192 // primitive case the types must match exactly with src.klass and
3193 // dst.klass each exactly matching the default type. For the
3194 // object array case, if no type check is needed then either the
3195 // dst type is exactly the expected type and the src type is a
3196 // subtype which we can't check or src is the same array as dst
3197 // but not necessarily exactly of type default_type.
3198 Label known_ok, halt;
3199 __ movoop(tmp, default_type->constant_encoding());
3200 if (basic_type != T_OBJECT) {
3201 __ cmpptr(tmp, dst_klass_addr);
3202 __ jcc(Assembler::notEqual, halt);
3203 __ cmpptr(tmp, src_klass_addr);
3204 __ jcc(Assembler::equal, known_ok);
3205 } else {
3206 __ cmpptr(tmp, dst_klass_addr);
3207 __ jcc(Assembler::equal, known_ok);
3208 __ cmpptr(src, dst);
3209 __ jcc(Assembler::equal, known_ok);
3210 }
3211 __ bind(halt);
3212 __ stop("incorrect type information in arraycopy");
3213 __ bind(known_ok);
3214 }
3215 #endif
3216
3217 if (shift_amount > 0 && basic_type != T_OBJECT) {
3218 __ shlptr(length, shift_amount);
3219 }
3220
3221 #ifdef _LP64
3222 assert_different_registers(c_rarg0, dst, dst_pos, length);
3223 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3224 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3225 assert_different_registers(c_rarg1, length);
3315 return;
3316 }
3317 }
3318
3319 // Receiver type not found in profile data; select an empty slot
3320
3321 // Note that this is less efficient than it should be because it
3322 // always does a write to the receiver part of the
3323 // VirtualCallData rather than just the first time
3324 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3325 ciKlass* receiver = vc_data->receiver(i);
3326 if (receiver == NULL) {
3327 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3328 __ movoop(recv_addr, known_klass->constant_encoding());
3329 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3330 __ addptr(data_addr, DataLayout::counter_increment);
3331 return;
3332 }
3333 }
3334 } else {
3335 __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
3336 Label update_done;
3337 type_profile_helper(mdo, md, data, recv, &update_done);
3338 // Receiver did not match any saved receiver and there is no empty row for it.
3339 // Increment total counter to indicate polymorphic case.
3340 __ addptr(counter_addr, DataLayout::counter_increment);
3341
3342 __ bind(update_done);
3343 }
3344 } else {
3345 // Static call
3346 __ addptr(counter_addr, DataLayout::counter_increment);
3347 }
3348 }
3349
3350 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3351 Unimplemented();
3352 }
3353
3354
3355 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
|
314 __ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
315 __ jcc(Assembler::notZero, L);
316 __ stop("locked object is NULL");
317 __ bind(L);
318 }
319 #endif
320 __ movptr(rbx, Address(OSR_buf, slot_offset + 0));
321 __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
322 __ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
323 __ movptr(frame_map()->address_for_monitor_object(i), rbx);
324 }
325 }
326 }
327
328
329 // inline cache check; done before the frame is built.
330 int LIR_Assembler::check_icache() {
331 Register receiver = FrameMap::receiver_opr->as_register();
332 Register ic_klass = IC_Klass;
333 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
334 const bool do_post_padding = VerifyOops || UseCompressedOops;
335 if (!do_post_padding) {
336 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
337 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
338 __ nop();
339 }
340 }
341 int offset = __ offset();
342 __ inline_cache_check(receiver, IC_Klass);
343 assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
344 if (do_post_padding) {
345 // force alignment after the cache check.
346 // It's been verified to be aligned if !VerifyOops
347 __ align(CodeEntryAlignment);
348 }
349 return offset;
350 }
351
352
353 void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
354 jobject o = NULL;
355 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id);
356 __ movoop(reg, o);
357 patching_epilog(patch, lir_patch_normal, reg, info);
358 }
359
360
361 void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_hdr, int monitor_no, Register exception) {
362 if (exception->is_valid()) {
363 // preserve exception
364 // note: the monitor_exit runtime call is a leaf routine
530 int offset = code_offset();
531 InternalAddress here(__ pc());
532
533 __ pushptr(here.addr());
534 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
535
536 assert(code_offset() - offset <= deopt_handler_size, "overflow");
537 __ end_a_stub();
538
539 return offset;
540 }
541
542
543 // This is the fast version of java.lang.String.compare; it has not
544 // OSR-entry and therefore, we generate a slow version for OSR's
545 void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
546 __ movptr (rbx, rcx); // receiver is in rcx
547 __ movptr (rax, arg1->as_register());
548
549 // Get addresses of first characters from both Strings
550 __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
551 __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
552 __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
553
554
555 // rbx, may be NULL
556 add_debug_info_for_null_check_here(info);
557 __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
558 __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
559 __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
560
561 // compute minimum length (in rax) and difference of lengths (on top of stack)
562 if (VM_Version::supports_cmov()) {
563 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
564 __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
565 __ mov (rcx, rbx);
566 __ subptr (rbx, rax); // subtract lengths
567 __ push (rbx); // result
568 __ cmov (Assembler::lessEqual, rax, rcx);
569 } else {
570 Label L;
571 __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
572 __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes()));
573 __ mov (rax, rbx);
574 __ subptr (rbx, rcx);
575 __ push (rbx);
576 __ jcc (Assembler::lessEqual, L);
577 __ mov (rax, rcx);
667 __ test32(rax, polling_page);
668 return offset;
669 }
670
671
672 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
673 if (from_reg != to_reg) __ mov(to_reg, from_reg);
674 }
675
676 void LIR_Assembler::swap_reg(Register a, Register b) {
677 __ xchgptr(a, b);
678 }
679
680
681 void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) {
682 assert(src->is_constant(), "should not call otherwise");
683 assert(dest->is_register(), "should not call otherwise");
684 LIR_Const* c = src->as_constant_ptr();
685
686 switch (c->type()) {
687 case T_INT: {
688 assert(patch_code == lir_patch_none, "no patching handled here");
689 __ movl(dest->as_register(), c->as_jint());
690 break;
691 }
692
693 case T_ADDRESS: {
694 assert(patch_code == lir_patch_none, "no patching handled here");
695 __ movptr(dest->as_register(), c->as_jint());
696 break;
697 }
698
699 case T_LONG: {
700 assert(patch_code == lir_patch_none, "no patching handled here");
701 #ifdef _LP64
702 __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
703 #else
704 __ movptr(dest->as_register_lo(), c->as_jint_lo());
705 __ movptr(dest->as_register_hi(), c->as_jint_hi());
706 #endif // _LP64
707 break;
708 }
709
710 case T_OBJECT: {
711 if (patch_code != lir_patch_none) {
712 jobject2reg_with_patching(dest->as_register(), info);
713 } else {
714 __ movoop(dest->as_register(), c->as_jobject());
715 }
716 break;
717 }
718
756 } else {
757 __ fld_d (InternalAddress(double_constant(c->as_jdouble())));
758 }
759 }
760 break;
761 }
762
763 default:
764 ShouldNotReachHere();
765 }
766 }
767
768 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
769 assert(src->is_constant(), "should not call otherwise");
770 assert(dest->is_stack(), "should not call otherwise");
771 LIR_Const* c = src->as_constant_ptr();
772
773 switch (c->type()) {
774 case T_INT: // fall through
775 case T_FLOAT:
776 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
777 break;
778
779 case T_ADDRESS:
780 __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
781 break;
782
783 case T_OBJECT:
784 __ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
785 break;
786
787 case T_LONG: // fall through
788 case T_DOUBLE:
789 #ifdef _LP64
790 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
791 lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
792 #else
793 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
794 lo_word_offset_in_bytes), c->as_jint_lo_bits());
795 __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
796 hi_word_offset_in_bytes), c->as_jint_hi_bits());
797 #endif // _LP64
798 break;
799
800 default:
801 ShouldNotReachHere();
802 }
803 }
804
805 void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
806 assert(src->is_constant(), "should not call otherwise");
807 assert(dest->is_address(), "should not call otherwise");
808 LIR_Const* c = src->as_constant_ptr();
809 LIR_Address* addr = dest->as_address_ptr();
810
811 int null_check_here = code_offset();
812 switch (type) {
813 case T_INT: // fall through
814 case T_FLOAT:
815 __ movl(as_Address(addr), c->as_jint_bits());
816 break;
817
818 case T_ADDRESS:
819 __ movptr(as_Address(addr), c->as_jint_bits());
820 break;
821
822 case T_OBJECT: // fall through
823 case T_ARRAY:
824 if (c->as_jobject() == NULL) {
825 #ifdef _LP64
826 if (UseCompressedOops && !wide) {
827 __ movl(as_Address(addr), (int32_t)NULL_WORD);
828 } else {
829 __ movptr(as_Address(addr), NULL_WORD);
830 }
831 #else
832 __ movptr(as_Address(addr), NULL_WORD);
833 #endif
834 } else {
835 if (is_literal_address(addr)) {
836 ShouldNotReachHere();
837 __ movoop(as_Address(addr, noreg), c->as_jobject());
838 } else {
839 #ifdef _LP64
840 __ movoop(rscratch1, c->as_jobject());
841 if (UseCompressedOops && !wide) {
842 __ encode_heap_oop(rscratch1);
843 null_check_here = code_offset();
844 __ movl(as_Address_lo(addr), rscratch1);
845 } else {
846 null_check_here = code_offset();
847 __ movptr(as_Address_lo(addr), rscratch1);
848 }
849 #else
850 __ movoop(as_Address(addr), c->as_jobject());
851 #endif
852 }
853 }
854 break;
855
856 case T_LONG: // fall through
857 case T_DOUBLE:
858 #ifdef _LP64
859 if (is_literal_address(addr)) {
860 ShouldNotReachHere();
861 __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
862 } else {
863 __ movptr(r10, (intptr_t)c->as_jlong_bits());
864 null_check_here = code_offset();
865 __ movptr(as_Address_lo(addr), r10);
866 }
867 #else
868 // Always reachable in 32bit so this doesn't produce useless move literal
1005 __ movdbl(dst_addr, src->as_xmm_double_reg());
1006
1007 } else if (src->is_single_fpu()) {
1008 assert(src->fpu_regnr() == 0, "argument must be on TOS");
1009 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1010 if (pop_fpu_stack) __ fstp_s (dst_addr);
1011 else __ fst_s (dst_addr);
1012
1013 } else if (src->is_double_fpu()) {
1014 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1015 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1016 if (pop_fpu_stack) __ fstp_d (dst_addr);
1017 else __ fst_d (dst_addr);
1018
1019 } else {
1020 ShouldNotReachHere();
1021 }
1022 }
1023
1024
1025 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */, bool wide) {
1026 LIR_Address* to_addr = dest->as_address_ptr();
1027 PatchingStub* patch = NULL;
1028
1029 #ifdef _LP64
1030 Register compressed_src = rscratch1;
1031 #endif
1032
1033 if (type == T_ARRAY || type == T_OBJECT) {
1034 __ verify_oop(src->as_register());
1035 #ifdef _LP64
1036 if (UseCompressedOops && !wide) {
1037 __ movptr(compressed_src, src->as_register());
1038 __ encode_heap_oop(compressed_src);
1039 }
1040 #endif
1041 }
1042
1043 if (patch_code != lir_patch_none) {
1044 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1045 Address toa = as_Address(to_addr);
1046 assert(toa.disp() != 0, "must have");
1047 }
1048
1049 int null_check_here = code_offset();
1050 switch (type) {
1051 case T_FLOAT: {
1052 if (src->is_single_xmm()) {
1053 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1054 } else {
1055 assert(src->is_single_fpu(), "must be");
1056 assert(src->fpu_regnr() == 0, "argument must be on TOS");
1057 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1058 else __ fst_s (as_Address(to_addr));
1059 }
1060 break;
1061 }
1062
1063 case T_DOUBLE: {
1064 if (src->is_double_xmm()) {
1065 __ movdbl(as_Address(to_addr), src->as_xmm_double_reg());
1066 } else {
1067 assert(src->is_double_fpu(), "must be");
1068 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
1069 if (pop_fpu_stack) __ fstp_d(as_Address(to_addr));
1070 else __ fst_d (as_Address(to_addr));
1071 }
1072 break;
1073 }
1074
1075 case T_ARRAY: // fall through
1076 case T_OBJECT: // fall through
1077 #ifdef _LP64
1078 if (UseCompressedOops && !wide) {
1079 __ movl(as_Address(to_addr), compressed_src);
1080 } else {
1081 __ movptr(as_Address(to_addr), src->as_register());
1082 }
1083 break;
1084 #endif // _LP64
1085 case T_ADDRESS:
1086 __ movptr(as_Address(to_addr), src->as_register());
1087 break;
1088 case T_INT:
1089 __ movl(as_Address(to_addr), src->as_register());
1090 break;
1091
1092 case T_LONG: {
1093 Register from_lo = src->as_register_lo();
1094 Register from_hi = src->as_register_hi();
1095 #ifdef _LP64
1096 __ movptr(as_Address_lo(to_addr), from_lo);
1097 #else
1098 Register base = to_addr->base()->as_register();
1099 Register index = noreg;
1100 if (to_addr->index()->is_register()) {
1101 index = to_addr->index()->as_register();
1102 }
1103 if (base == from_lo || index == from_lo) {
1104 assert(base != from_hi, "can't be");
1105 assert(index == noreg || (index != base && index != from_hi), "can't handle this");
1106 __ movl(as_Address_hi(to_addr), from_hi);
1107 if (patch != NULL) {
1124 break;
1125 }
1126
1127 case T_BYTE: // fall through
1128 case T_BOOLEAN: {
1129 Register src_reg = src->as_register();
1130 Address dst_addr = as_Address(to_addr);
1131 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1132 __ movb(dst_addr, src_reg);
1133 break;
1134 }
1135
1136 case T_CHAR: // fall through
1137 case T_SHORT:
1138 __ movw(as_Address(to_addr), src->as_register());
1139 break;
1140
1141 default:
1142 ShouldNotReachHere();
1143 }
1144 if (info != NULL) {
1145 add_debug_info_for_null_check(null_check_here, info);
1146 }
1147
1148 if (patch_code != lir_patch_none) {
1149 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1150 }
1151 }
1152
1153
1154 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1155 assert(src->is_stack(), "should not call otherwise");
1156 assert(dest->is_register(), "should not call otherwise");
1157
1158 if (dest->is_single_cpu()) {
1159 if (type == T_ARRAY || type == T_OBJECT) {
1160 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1161 __ verify_oop(dest->as_register());
1162 } else {
1163 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1164 }
1165
1166 } else if (dest->is_double_cpu()) {
1210 }
1211
1212 } else if (src->is_double_stack()) {
1213 #ifdef _LP64
1214 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1215 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1216 #else
1217 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1218 // push and pop the part at src + wordSize, adding wordSize for the previous push
1219 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1220 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1221 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1222 #endif // _LP64
1223
1224 } else {
1225 ShouldNotReachHere();
1226 }
1227 }
1228
1229
1230 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */, bool wide) {
1231 assert(src->is_address(), "should not call otherwise");
1232 assert(dest->is_register(), "should not call otherwise");
1233
1234 LIR_Address* addr = src->as_address_ptr();
1235 Address from_addr = as_Address(addr);
1236
1237 switch (type) {
1238 case T_BOOLEAN: // fall through
1239 case T_BYTE: // fall through
1240 case T_CHAR: // fall through
1241 case T_SHORT:
1242 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1243 // on pre P6 processors we may get partial register stalls
1244 // so blow away the value of to_rinfo before loading a
1245 // partial word into it. Do it here so that it precedes
1246 // the potential patch point below.
1247 __ xorptr(dest->as_register(), dest->as_register());
1248 }
1249 break;
1250 }
1264 __ movflt(dest->as_xmm_float_reg(), from_addr);
1265 } else {
1266 assert(dest->is_single_fpu(), "must be");
1267 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1268 __ fld_s(from_addr);
1269 }
1270 break;
1271 }
1272
1273 case T_DOUBLE: {
1274 if (dest->is_double_xmm()) {
1275 __ movdbl(dest->as_xmm_double_reg(), from_addr);
1276 } else {
1277 assert(dest->is_double_fpu(), "must be");
1278 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1279 __ fld_d(from_addr);
1280 }
1281 break;
1282 }
1283
1284 case T_OBJECT: // fall through
1285 case T_ARRAY: // fall through
1286 #ifdef _LP64
1287 if (UseCompressedOops && !wide) {
1288 __ movl(dest->as_register(), from_addr);
1289 } else {
1290 __ movptr(dest->as_register(), from_addr);
1291 }
1292 break;
1293 #endif // _L64
1294 case T_ADDRESS:
1295 __ movptr(dest->as_register(), from_addr);
1296 break;
1297 case T_INT:
1298 __ movl(dest->as_register(), from_addr);
1299 break;
1300
1301 case T_LONG: {
1302 Register to_lo = dest->as_register_lo();
1303 Register to_hi = dest->as_register_hi();
1304 #ifdef _LP64
1305 __ movptr(to_lo, as_Address_lo(addr));
1306 #else
1307 Register base = addr->base()->as_register();
1308 Register index = noreg;
1309 if (addr->index()->is_register()) {
1310 index = addr->index()->as_register();
1311 }
1312 if ((base == to_lo && index == to_hi) ||
1313 (base == to_hi && index == to_lo)) {
1314 // addresses with 2 registers are only formed as a result of
1315 // array access so this code will never have to deal with
1316 // patches or null checks.
1371 Register dest_reg = dest->as_register();
1372 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1373 __ movswl(dest_reg, from_addr);
1374 } else {
1375 __ movw(dest_reg, from_addr);
1376 __ shll(dest_reg, 16);
1377 __ sarl(dest_reg, 16);
1378 }
1379 break;
1380 }
1381
1382 default:
1383 ShouldNotReachHere();
1384 }
1385
1386 if (patch != NULL) {
1387 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1388 }
1389
1390 if (type == T_ARRAY || type == T_OBJECT) {
1391 #ifdef _LP64
1392 if (UseCompressedOops && !wide) {
1393 __ decode_heap_oop(dest->as_register());
1394 }
1395 #endif
1396 __ verify_oop(dest->as_register());
1397 }
1398 }
1399
1400
1401 void LIR_Assembler::prefetchr(LIR_Opr src) {
1402 LIR_Address* addr = src->as_address_ptr();
1403 Address from_addr = as_Address(addr);
1404
1405 if (VM_Version::supports_sse()) {
1406 switch (ReadPrefetchInstr) {
1407 case 0:
1408 __ prefetchnta(from_addr); break;
1409 case 1:
1410 __ prefetcht0(from_addr); break;
1411 case 2:
1412 __ prefetcht2(from_addr); break;
1413 default:
1414 ShouldNotReachHere(); break;
1415 }
1715 assert(method != NULL, "Should have method");
1716 int bci = op->profiled_bci();
1717 md = method->method_data();
1718 if (md == NULL) {
1719 bailout("out of memory building methodDataOop");
1720 return;
1721 }
1722 data = md->bci_to_data(bci);
1723 assert(data != NULL, "need data for type check");
1724 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1725 }
1726 Label profile_cast_success, profile_cast_failure;
1727 Label *success_target = op->should_profile() ? &profile_cast_success : success;
1728 Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
1729
1730 if (obj == k_RInfo) {
1731 k_RInfo = dst;
1732 } else if (obj == klass_RInfo) {
1733 klass_RInfo = dst;
1734 }
1735 if (k->is_loaded() && LP64_ONLY(!UseCompressedOops) NOT_LP64(true)) {
1736 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1737 } else {
1738 Rtmp1 = op->tmp3()->as_register();
1739 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1740 }
1741
1742 assert_different_registers(obj, k_RInfo, klass_RInfo);
1743 if (!k->is_loaded()) {
1744 jobject2reg_with_patching(k_RInfo, op->info_for_patch());
1745 } else {
1746 #ifdef _LP64
1747 __ movoop(k_RInfo, k->constant_encoding());
1748 #endif // _LP64
1749 }
1750 assert(obj != k_RInfo, "must be different");
1751
1752 __ cmpptr(obj, (int32_t)NULL_WORD);
1753 if (op->should_profile()) {
1754 Label not_null;
1755 __ jccb(Assembler::notEqual, not_null);
1756 // Object is null; update MDO and exit
1757 Register mdo = klass_RInfo;
1758 __ movoop(mdo, md->constant_encoding());
1759 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1760 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1761 __ orl(data_addr, header_bits);
1762 __ jmp(*obj_is_null);
1763 __ bind(not_null);
1764 } else {
1765 __ jcc(Assembler::equal, *obj_is_null);
1766 }
1767 __ verify_oop(obj);
1768
1769 if (op->fast_check()) {
1770 // get object class
1771 // not a safepoint as obj null check happens earlier
1772 #ifdef _LP64
1773 if (UseCompressedOops) {
1774 __ load_klass(Rtmp1, obj);
1775 __ cmpl(k_RInfo, Rtmp1);
1776 } else {
1777 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1778 }
1779 #else
1780 if (k->is_loaded()) {
1781 __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
1782 } else {
1783 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
1784 }
1785 #endif
1786 __ jcc(Assembler::notEqual, *failure_target);
1787 // successful cast, fall through to profile or jump
1788 } else {
1789 // get object class
1790 // not a safepoint as obj null check happens earlier
1791 __ load_klass(klass_RInfo, obj);
1792 if (k->is_loaded()) {
1793 // See if we get an immediate positive hit
1794 #ifdef _LP64
1795 __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
1796 #else
1797 __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
1798 #endif // _LP64
1799 if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
1800 __ jcc(Assembler::notEqual, *failure_target);
1801 // successful cast, fall through to profile or jump
1802 } else {
1803 // See if we get an immediate positive hit
1804 __ jcc(Assembler::equal, *success_target);
1805 // check for self
1806 #ifdef _LP64
1807 __ cmpptr(klass_RInfo, k_RInfo);
1808 #else
1809 __ cmpoop(klass_RInfo, k->constant_encoding());
1810 #endif // _LP64
1811 __ jcc(Assembler::equal, *success_target);
1826 }
1827 } else {
1828 // perform the fast part of the checking logic
1829 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1830 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1831 __ push(klass_RInfo);
1832 __ push(k_RInfo);
1833 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1834 __ pop(klass_RInfo);
1835 __ pop(k_RInfo);
1836 // result is a boolean
1837 __ cmpl(k_RInfo, 0);
1838 __ jcc(Assembler::equal, *failure_target);
1839 // successful cast, fall through to profile or jump
1840 }
1841 }
1842 if (op->should_profile()) {
1843 Register mdo = klass_RInfo, recv = k_RInfo;
1844 __ bind(profile_cast_success);
1845 __ movoop(mdo, md->constant_encoding());
1846 __ load_klass(recv, obj);
1847 Label update_done;
1848 type_profile_helper(mdo, md, data, recv, success);
1849 __ jmp(*success);
1850
1851 __ bind(profile_cast_failure);
1852 __ movoop(mdo, md->constant_encoding());
1853 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1854 __ subptr(counter_addr, DataLayout::counter_increment);
1855 __ jmp(*failure);
1856 }
1857 __ jmp(*success);
1858 }
1859
1860
1861 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
1862 LIR_Code code = op->code();
1863 if (code == lir_store_check) {
1864 Register value = op->object()->as_register();
1865 Register array = op->array()->as_register();
1866 Register k_RInfo = op->tmp1()->as_register();
1890 Label *success_target = op->should_profile() ? &profile_cast_success : &done;
1891 Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
1892
1893 __ cmpptr(value, (int32_t)NULL_WORD);
1894 if (op->should_profile()) {
1895 Label not_null;
1896 __ jccb(Assembler::notEqual, not_null);
1897 // Object is null; update MDO and exit
1898 Register mdo = klass_RInfo;
1899 __ movoop(mdo, md->constant_encoding());
1900 Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
1901 int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
1902 __ orl(data_addr, header_bits);
1903 __ jmp(done);
1904 __ bind(not_null);
1905 } else {
1906 __ jcc(Assembler::equal, done);
1907 }
1908
1909 add_debug_info_for_null_check_here(op->info_for_exception());
1910 __ load_klass(k_RInfo, array);
1911 __ load_klass(klass_RInfo, value);
1912
1913 // get instance klass (it's already uncompressed)
1914 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
1915 // perform the fast part of the checking logic
1916 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
1917 // call out-of-line instance of __ check_klass_subtype_slow_path(...):
1918 __ push(klass_RInfo);
1919 __ push(k_RInfo);
1920 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
1921 __ pop(klass_RInfo);
1922 __ pop(k_RInfo);
1923 // result is a boolean
1924 __ cmpl(k_RInfo, 0);
1925 __ jcc(Assembler::equal, *failure_target);
1926 // fall through to the success case
1927
1928 if (op->should_profile()) {
1929 Register mdo = klass_RInfo, recv = k_RInfo;
1930 __ bind(profile_cast_success);
1931 __ movoop(mdo, md->constant_encoding());
1932 __ load_klass(recv, value);
1933 Label update_done;
1934 type_profile_helper(mdo, md, data, recv, &done);
1935 __ jmpb(done);
1936
1937 __ bind(profile_cast_failure);
1938 __ movoop(mdo, md->constant_encoding());
1939 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1940 __ subptr(counter_addr, DataLayout::counter_increment);
1941 __ jmp(*stub->entry());
1942 }
1943
1944 __ bind(done);
1945 } else
1946 if (code == lir_checkcast) {
1947 Register obj = op->object()->as_register();
1948 Register dst = op->result_opr()->as_register();
1949 Label success;
1950 emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
1951 __ bind(success);
1952 if (dst != obj) {
1976 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1977 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1978 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1979 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1980 Register addr = op->addr()->as_register();
1981 if (os::is_MP()) {
1982 __ lock();
1983 }
1984 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1985
1986 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1987 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1988 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1989 Register newval = op->new_value()->as_register();
1990 Register cmpval = op->cmp_value()->as_register();
1991 assert(cmpval == rax, "wrong register");
1992 assert(newval != NULL, "new val must be register");
1993 assert(cmpval != newval, "cmp and new values must be in different registers");
1994 assert(cmpval != addr, "cmp and addr must be in different registers");
1995 assert(newval != addr, "new value and addr must be in different registers");
1996
1997 if ( op->code() == lir_cas_obj) {
1998 #ifdef _LP64
1999 if (UseCompressedOops) {
2000 __ mov(rscratch1, cmpval);
2001 __ encode_heap_oop(cmpval);
2002 __ mov(rscratch2, newval);
2003 __ encode_heap_oop(rscratch2);
2004 if (os::is_MP()) {
2005 __ lock();
2006 }
2007 __ cmpxchgl(rscratch2, Address(addr, 0));
2008 __ mov(cmpval, rscratch1);
2009 } else
2010 #endif
2011 {
2012 if (os::is_MP()) {
2013 __ lock();
2014 }
2015 __ cmpxchgptr(newval, Address(addr, 0));
2016 }
2017 } else {
2018 assert(op->code() == lir_cas_int, "lir_cas_int expected");
2019 if (os::is_MP()) {
2020 __ lock();
2021 }
2022 __ cmpxchgl(newval, Address(addr, 0));
2023 }
2024 #ifdef _LP64
2025 } else if (op->code() == lir_cas_long) {
2026 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2027 Register newval = op->new_value()->as_register_lo();
2028 Register cmpval = op->cmp_value()->as_register_lo();
2029 assert(cmpval == rax, "wrong register");
2030 assert(newval != NULL, "new val must be register");
2031 assert(cmpval != newval, "cmp and new values must be in different registers");
2032 assert(cmpval != addr, "cmp and addr must be in different registers");
2033 assert(newval != addr, "new value and addr must be in different registers");
2034 if (os::is_MP()) {
2035 __ lock();
2036 }
2037 __ cmpxchgq(newval, Address(addr, 0));
2038 #endif // _LP64
2039 } else {
2040 Unimplemented();
2041 }
3243 __ testl(dst_pos, dst_pos);
3244 __ jcc(Assembler::less, *stub->entry());
3245 }
3246 if (flags & LIR_OpArrayCopy::length_positive_check) {
3247 __ testl(length, length);
3248 __ jcc(Assembler::less, *stub->entry());
3249 }
3250
3251 if (flags & LIR_OpArrayCopy::src_range_check) {
3252 __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
3253 __ cmpl(tmp, src_length_addr);
3254 __ jcc(Assembler::above, *stub->entry());
3255 }
3256 if (flags & LIR_OpArrayCopy::dst_range_check) {
3257 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3258 __ cmpl(tmp, dst_length_addr);
3259 __ jcc(Assembler::above, *stub->entry());
3260 }
3261
3262 if (flags & LIR_OpArrayCopy::type_check) {
3263 #ifdef _LP64
3264 if (UseCompressedOops) {
3265 __ movl(tmp, src_klass_addr);
3266 __ cmpl(tmp, dst_klass_addr);
3267 } else
3268 #endif
3269 {
3270 __ movptr(tmp, src_klass_addr);
3271 __ cmpptr(tmp, dst_klass_addr);
3272 }
3273 __ jcc(Assembler::notEqual, *stub->entry());
3274 }
3275
3276 #ifdef ASSERT
3277 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3278 // Sanity check the known type with the incoming class. For the
3279 // primitive case the types must match exactly with src.klass and
3280 // dst.klass each exactly matching the default type. For the
3281 // object array case, if no type check is needed then either the
3282 // dst type is exactly the expected type and the src type is a
3283 // subtype which we can't check or src is the same array as dst
3284 // but not necessarily exactly of type default_type.
3285 Label known_ok, halt;
3286 __ movoop(tmp, default_type->constant_encoding());
3287 #ifdef _LP64
3288 if (UseCompressedOops) {
3289 __ encode_heap_oop(tmp);
3290 }
3291 #endif
3292
3293 if (basic_type != T_OBJECT) {
3294 #ifdef _LP64
3295 if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
3296 else
3297 #endif
3298 __ cmpptr(tmp, dst_klass_addr);
3299 __ jcc(Assembler::notEqual, halt);
3300 #ifdef _LP64
3301 if (UseCompressedOops) __ cmpl(tmp, src_klass_addr);
3302 else
3303 #endif
3304 __ cmpptr(tmp, src_klass_addr);
3305 __ jcc(Assembler::equal, known_ok);
3306 } else {
3307 #ifdef _LP64
3308 if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
3309 else
3310 #endif
3311 __ cmpptr(tmp, dst_klass_addr);
3312 __ jcc(Assembler::equal, known_ok);
3313 __ cmpptr(src, dst);
3314 __ jcc(Assembler::equal, known_ok);
3315 }
3316 __ bind(halt);
3317 __ stop("incorrect type information in arraycopy");
3318 __ bind(known_ok);
3319 }
3320 #endif
3321
3322 if (shift_amount > 0 && basic_type != T_OBJECT) {
3323 __ shlptr(length, shift_amount);
3324 }
3325
3326 #ifdef _LP64
3327 assert_different_registers(c_rarg0, dst, dst_pos, length);
3328 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3329 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3330 assert_different_registers(c_rarg1, length);
3420 return;
3421 }
3422 }
3423
3424 // Receiver type not found in profile data; select an empty slot
3425
3426 // Note that this is less efficient than it should be because it
3427 // always does a write to the receiver part of the
3428 // VirtualCallData rather than just the first time
3429 for (i = 0; i < VirtualCallData::row_limit(); i++) {
3430 ciKlass* receiver = vc_data->receiver(i);
3431 if (receiver == NULL) {
3432 Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
3433 __ movoop(recv_addr, known_klass->constant_encoding());
3434 Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
3435 __ addptr(data_addr, DataLayout::counter_increment);
3436 return;
3437 }
3438 }
3439 } else {
3440 __ load_klass(recv, recv);
3441 Label update_done;
3442 type_profile_helper(mdo, md, data, recv, &update_done);
3443 // Receiver did not match any saved receiver and there is no empty row for it.
3444 // Increment total counter to indicate polymorphic case.
3445 __ addptr(counter_addr, DataLayout::counter_increment);
3446
3447 __ bind(update_done);
3448 }
3449 } else {
3450 // Static call
3451 __ addptr(counter_addr, DataLayout::counter_increment);
3452 }
3453 }
3454
3455 void LIR_Assembler::emit_delay(LIR_OpDelay*) {
3456 Unimplemented();
3457 }
3458
3459
3460 void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
|