1 /*
2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
844 assert(src->is_register(), "should not call otherwise");
845 assert(dest->is_register(), "should not call otherwise");
846
847 // move between cpu-registers
848 if (dest->is_single_cpu()) {
849 #ifdef _LP64
850 if (src->type() == T_LONG) {
851 // Can do LONG -> OBJECT
852 move_regs(src->as_register_lo(), dest->as_register());
853 return;
854 }
855 #endif
856 assert(src->is_single_cpu(), "must match");
857 if (src->type() == T_OBJECT) {
858 __ verify_oop(src->as_register());
859 }
860 move_regs(src->as_register(), dest->as_register());
861
862 } else if (dest->is_double_cpu()) {
863 #ifdef _LP64
864 if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
865 // Surprising to me but we can see move of a long to t_object
866 __ verify_oop(src->as_register());
867 move_regs(src->as_register(), dest->as_register_lo());
868 return;
869 }
870 #endif
871 assert(src->is_double_cpu(), "must match");
872 Register f_lo = src->as_register_lo();
873 Register f_hi = src->as_register_hi();
874 Register t_lo = dest->as_register_lo();
875 Register t_hi = dest->as_register_hi();
876 #ifdef _LP64
877 assert(f_hi == f_lo, "must be same");
878 assert(t_hi == t_lo, "must be same");
879 move_regs(f_lo, t_lo);
880 #else
881 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
882
883
884 if (f_lo == t_hi && f_hi == t_lo) {
915 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
916 } else if (dest->is_double_xmm()) {
917 assert(src->is_double_xmm(), "must match");
918 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
919
920 // move between fpu-registers (no instruction necessary because of fpu-stack)
921 } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
922 assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
923 assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
924 } else {
925 ShouldNotReachHere();
926 }
927 }
928
929 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
930 assert(src->is_register(), "should not call otherwise");
931 assert(dest->is_stack(), "should not call otherwise");
932
933 if (src->is_single_cpu()) {
934 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
935 if (type == T_OBJECT || type == T_ARRAY) {
936 __ verify_oop(src->as_register());
937 __ movptr (dst, src->as_register());
938 } else if (type == T_METADATA) {
939 __ movptr (dst, src->as_register());
940 } else {
941 __ movl (dst, src->as_register());
942 }
943
944 } else if (src->is_double_cpu()) {
945 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
946 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
947 __ movptr (dstLO, src->as_register_lo());
948 NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
949
950 } else if (src->is_single_xmm()) {
951 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
952 __ movflt(dst_addr, src->as_xmm_float_reg());
953
954 } else if (src->is_double_xmm()) {
955 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
961 if (pop_fpu_stack) __ fstp_s (dst_addr);
962 else __ fst_s (dst_addr);
963
964 } else if (src->is_double_fpu()) {
965 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
966 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
967 if (pop_fpu_stack) __ fstp_d (dst_addr);
968 else __ fst_d (dst_addr);
969
970 } else {
971 ShouldNotReachHere();
972 }
973 }
974
975
976 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
977 LIR_Address* to_addr = dest->as_address_ptr();
978 PatchingStub* patch = NULL;
979 Register compressed_src = rscratch1;
980
981 if (type == T_ARRAY || type == T_OBJECT) {
982 __ verify_oop(src->as_register());
983 #ifdef _LP64
984 if (UseCompressedOops && !wide) {
985 __ movptr(compressed_src, src->as_register());
986 __ encode_heap_oop(compressed_src);
987 if (patch_code != lir_patch_none) {
988 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
989 }
990 }
991 #endif
992 }
993
994 if (patch_code != lir_patch_none) {
995 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
996 Address toa = as_Address(to_addr);
997 assert(toa.disp() != 0, "must have");
998 }
999
1000 int null_check_here = code_offset();
1001 switch (type) {
1096 break;
1097
1098 default:
1099 ShouldNotReachHere();
1100 }
1101 if (info != NULL) {
1102 add_debug_info_for_null_check(null_check_here, info);
1103 }
1104
1105 if (patch_code != lir_patch_none) {
1106 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1107 }
1108 }
1109
1110
1111 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1112 assert(src->is_stack(), "should not call otherwise");
1113 assert(dest->is_register(), "should not call otherwise");
1114
1115 if (dest->is_single_cpu()) {
1116 if (type == T_ARRAY || type == T_OBJECT) {
1117 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1118 __ verify_oop(dest->as_register());
1119 } else if (type == T_METADATA) {
1120 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1121 } else {
1122 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1123 }
1124
1125 } else if (dest->is_double_cpu()) {
1126 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1127 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1128 __ movptr(dest->as_register_lo(), src_addr_LO);
1129 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1130
1131 } else if (dest->is_single_xmm()) {
1132 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1133 __ movflt(dest->as_xmm_float_reg(), src_addr);
1134
1135 } else if (dest->is_double_xmm()) {
1136 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1137 __ movdbl(dest->as_xmm_double_reg(), src_addr);
1138
1139 } else if (dest->is_single_fpu()) {
1140 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1141 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1142 __ fld_s(src_addr);
1143
1144 } else if (dest->is_double_fpu()) {
1145 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1146 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1147 __ fld_d(src_addr);
1148
1149 } else {
1150 ShouldNotReachHere();
1151 }
1152 }
1153
1154
1155 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1156 if (src->is_single_stack()) {
1157 if (type == T_OBJECT || type == T_ARRAY) {
1158 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1159 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1160 } else {
1161 #ifndef _LP64
1162 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1163 __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1164 #else
1165 //no pushl on 64bits
1166 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1167 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1168 #endif
1169 }
1170
1171 } else if (src->is_double_stack()) {
1172 #ifdef _LP64
1173 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1174 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1175 #else
1176 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1177 // push and pop the part at src + wordSize, adding wordSize for the previous push
1338 case T_SHORT: {
1339 Register dest_reg = dest->as_register();
1340 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1341 __ movswl(dest_reg, from_addr);
1342 } else {
1343 __ movw(dest_reg, from_addr);
1344 __ shll(dest_reg, 16);
1345 __ sarl(dest_reg, 16);
1346 }
1347 break;
1348 }
1349
1350 default:
1351 ShouldNotReachHere();
1352 }
1353
1354 if (patch != NULL) {
1355 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1356 }
1357
1358 if (type == T_ARRAY || type == T_OBJECT) {
1359 #ifdef _LP64
1360 if (UseCompressedOops && !wide) {
1361 __ decode_heap_oop(dest->as_register());
1362 }
1363 #endif
1364
1365 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1366 if (!UseZGC) {
1367 __ verify_oop(dest->as_register());
1368 }
1369 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1370 #ifdef _LP64
1371 if (UseCompressedClassPointers) {
1372 __ decode_klass_not_null(dest->as_register());
1373 }
1374 #endif
1375 }
1376 }
1377
1378
1576 __ cmpb(Address(op->klass()->as_register(),
1577 InstanceKlass::init_state_offset()),
1578 InstanceKlass::fully_initialized);
1579 __ jcc(Assembler::notEqual, *op->stub()->entry());
1580 }
1581 __ allocate_object(op->obj()->as_register(),
1582 op->tmp1()->as_register(),
1583 op->tmp2()->as_register(),
1584 op->header_size(),
1585 op->object_size(),
1586 op->klass()->as_register(),
1587 *op->stub()->entry());
1588 __ bind(*op->stub()->continuation());
1589 }
1590
1591 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1592 Register len = op->len()->as_register();
1593 LP64_ONLY( __ movslq(len, len); )
1594
1595 if (UseSlowPath ||
1596 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1597 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1598 __ jmp(*op->stub()->entry());
1599 } else {
1600 Register tmp1 = op->tmp1()->as_register();
1601 Register tmp2 = op->tmp2()->as_register();
1602 Register tmp3 = op->tmp3()->as_register();
1603 if (len == tmp1) {
1604 tmp1 = tmp3;
1605 } else if (len == tmp2) {
1606 tmp2 = tmp3;
1607 } else if (len == tmp3) {
1608 // everything is ok
1609 } else {
1610 __ mov(tmp3, len);
1611 }
1612 __ allocate_array(op->obj()->as_register(),
1613 len,
1614 tmp1,
1615 tmp2,
1616 arrayOopDesc::header_size(op->type()),
1617 array_element_size(op->type()),
2493 int r_hi = right->as_constant_ptr()->as_jint_hi();
2494 switch (code) {
2495 case lir_logic_and:
2496 __ andl(l_lo, r_lo);
2497 __ andl(l_hi, r_hi);
2498 break;
2499 case lir_logic_or:
2500 __ orl(l_lo, r_lo);
2501 __ orl(l_hi, r_hi);
2502 break;
2503 case lir_logic_xor:
2504 __ xorl(l_lo, r_lo);
2505 __ xorl(l_hi, r_hi);
2506 break;
2507 default: ShouldNotReachHere();
2508 }
2509 #endif // _LP64
2510 } else {
2511 #ifdef _LP64
2512 Register r_lo;
2513 if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
2514 r_lo = right->as_register();
2515 } else {
2516 r_lo = right->as_register_lo();
2517 }
2518 #else
2519 Register r_lo = right->as_register_lo();
2520 Register r_hi = right->as_register_hi();
2521 assert(l_lo != r_hi, "overwriting registers");
2522 #endif
2523 switch (code) {
2524 case lir_logic_and:
2525 __ andptr(l_lo, r_lo);
2526 NOT_LP64(__ andptr(l_hi, r_hi);)
2527 break;
2528 case lir_logic_or:
2529 __ orptr(l_lo, r_lo);
2530 NOT_LP64(__ orptr(l_hi, r_hi);)
2531 break;
2532 case lir_logic_xor:
2533 __ xorptr(l_lo, r_lo);
2606 move_regs(lreg, rax);
2607
2608 int idivl_offset = __ corrected_idivl(rreg);
2609 if (ImplicitDiv0Checks) {
2610 add_debug_info_for_div0(idivl_offset, info);
2611 }
2612 if (code == lir_irem) {
2613 move_regs(rdx, dreg); // result is in rdx
2614 } else {
2615 move_regs(rax, dreg);
2616 }
2617 }
2618 }
2619
2620
2621 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2622 if (opr1->is_single_cpu()) {
2623 Register reg1 = opr1->as_register();
2624 if (opr2->is_single_cpu()) {
2625 // cpu register - cpu register
2626 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2627 __ cmpoop(reg1, opr2->as_register());
2628 } else {
2629 assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
2630 __ cmpl(reg1, opr2->as_register());
2631 }
2632 } else if (opr2->is_stack()) {
2633 // cpu register - stack
2634 if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
2635 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2636 } else {
2637 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2638 }
2639 } else if (opr2->is_constant()) {
2640 // cpu register - constant
2641 LIR_Const* c = opr2->as_constant_ptr();
2642 if (c->type() == T_INT) {
2643 __ cmpl(reg1, c->as_jint());
2644 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2645 // In 64bit oops are single register
2646 jobject o = c->as_jobject();
2647 if (o == NULL) {
2648 __ cmpptr(reg1, (int32_t)NULL_WORD);
2649 } else {
2650 __ cmpoop(reg1, o);
2651 }
2652 } else {
2653 fatal("unexpected type: %s", basictype_to_str(c->type()));
2654 }
2655 // cpu register - address
2656 } else if (opr2->is_address()) {
2657 if (op->info() != NULL) {
2658 add_debug_info_for_null_check_here(op->info());
2659 }
2660 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2661 } else {
2662 ShouldNotReachHere();
2663 }
2664
2724 // xmm register - constant
2725 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2726 } else if (opr2->is_address()) {
2727 // xmm register - address
2728 if (op->info() != NULL) {
2729 add_debug_info_for_null_check_here(op->info());
2730 }
2731 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2732 } else {
2733 ShouldNotReachHere();
2734 }
2735
2736 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2737 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2738 assert(opr2->is_fpu_register(), "both must be registers");
2739 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2740
2741 } else if (opr1->is_address() && opr2->is_constant()) {
2742 LIR_Const* c = opr2->as_constant_ptr();
2743 #ifdef _LP64
2744 if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2745 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2746 __ movoop(rscratch1, c->as_jobject());
2747 }
2748 #endif // LP64
2749 if (op->info() != NULL) {
2750 add_debug_info_for_null_check_here(op->info());
2751 }
2752 // special case: address - constant
2753 LIR_Address* addr = opr1->as_address_ptr();
2754 if (c->type() == T_INT) {
2755 __ cmpl(as_Address(addr), c->as_jint());
2756 } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
2757 #ifdef _LP64
2758 // %%% Make this explode if addr isn't reachable until we figure out a
2759 // better strategy by giving noreg as the temp for as_Address
2760 __ cmpoop(rscratch1, as_Address(addr, noreg));
2761 #else
2762 __ cmpoop(as_Address(addr), c->as_jobject());
2763 #endif // _LP64
2764 } else {
2765 ShouldNotReachHere();
2766 }
2767
2768 } else {
2769 ShouldNotReachHere();
2770 }
2771 }
2772
2773 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2774 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2775 if (left->is_single_xmm()) {
2776 assert(right->is_single_xmm(), "must match");
3035
3036
3037 // This code replaces a call to arraycopy; no exception may
3038 // be thrown in this code, they must be thrown in the System.arraycopy
3039 // activation frame; we could save some checks if this would not be the case
3040 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3041 ciArrayKlass* default_type = op->expected_type();
3042 Register src = op->src()->as_register();
3043 Register dst = op->dst()->as_register();
3044 Register src_pos = op->src_pos()->as_register();
3045 Register dst_pos = op->dst_pos()->as_register();
3046 Register length = op->length()->as_register();
3047 Register tmp = op->tmp()->as_register();
3048
3049 __ resolve(ACCESS_READ, src);
3050 __ resolve(ACCESS_WRITE, dst);
3051
3052 CodeStub* stub = op->stub();
3053 int flags = op->flags();
3054 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3055 if (basic_type == T_ARRAY) basic_type = T_OBJECT;
3056
3057 // if we don't know anything, just go through the generic arraycopy
3058 if (default_type == NULL) {
3059 // save outgoing arguments on stack in case call to System.arraycopy is needed
3060 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3061 // for interpreter calling conventions. Now we have to do it in new style conventions.
3062 // For the moment until C1 gets the new register allocator I just force all the
3063 // args to the right place (except the register args) and then on the back side
3064 // reload the register args properly if we go slow path. Yuck
3065
3066 // These are proper for the calling convention
3067 store_parameter(length, 2);
3068 store_parameter(dst_pos, 1);
3069 store_parameter(dst, 0);
3070
3071 // these are just temporary placements until we need to reload
3072 store_parameter(src_pos, 3);
3073 store_parameter(src, 4);
3074 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3075
|
1 /*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
844 assert(src->is_register(), "should not call otherwise");
845 assert(dest->is_register(), "should not call otherwise");
846
847 // move between cpu-registers
848 if (dest->is_single_cpu()) {
849 #ifdef _LP64
850 if (src->type() == T_LONG) {
851 // Can do LONG -> OBJECT
852 move_regs(src->as_register_lo(), dest->as_register());
853 return;
854 }
855 #endif
856 assert(src->is_single_cpu(), "must match");
857 if (src->type() == T_OBJECT) {
858 __ verify_oop(src->as_register());
859 }
860 move_regs(src->as_register(), dest->as_register());
861
862 } else if (dest->is_double_cpu()) {
863 #ifdef _LP64
864 if (is_reference_type(src->type())) {
865 // Surprising to me but we can see move of a long to t_object
866 __ verify_oop(src->as_register());
867 move_regs(src->as_register(), dest->as_register_lo());
868 return;
869 }
870 #endif
871 assert(src->is_double_cpu(), "must match");
872 Register f_lo = src->as_register_lo();
873 Register f_hi = src->as_register_hi();
874 Register t_lo = dest->as_register_lo();
875 Register t_hi = dest->as_register_hi();
876 #ifdef _LP64
877 assert(f_hi == f_lo, "must be same");
878 assert(t_hi == t_lo, "must be same");
879 move_regs(f_lo, t_lo);
880 #else
881 assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
882
883
884 if (f_lo == t_hi && f_hi == t_lo) {
915 __ movflt(dest->as_xmm_float_reg(), src->as_xmm_float_reg());
916 } else if (dest->is_double_xmm()) {
917 assert(src->is_double_xmm(), "must match");
918 __ movdbl(dest->as_xmm_double_reg(), src->as_xmm_double_reg());
919
920 // move between fpu-registers (no instruction necessary because of fpu-stack)
921 } else if (dest->is_single_fpu() || dest->is_double_fpu()) {
922 assert(src->is_single_fpu() || src->is_double_fpu(), "must match");
923 assert(src->fpu() == dest->fpu(), "currently should be nothing to do");
924 } else {
925 ShouldNotReachHere();
926 }
927 }
928
929 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
930 assert(src->is_register(), "should not call otherwise");
931 assert(dest->is_stack(), "should not call otherwise");
932
933 if (src->is_single_cpu()) {
934 Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
935 if (is_reference_type(type)) {
936 __ verify_oop(src->as_register());
937 __ movptr (dst, src->as_register());
938 } else if (type == T_METADATA) {
939 __ movptr (dst, src->as_register());
940 } else {
941 __ movl (dst, src->as_register());
942 }
943
944 } else if (src->is_double_cpu()) {
945 Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
946 Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
947 __ movptr (dstLO, src->as_register_lo());
948 NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
949
950 } else if (src->is_single_xmm()) {
951 Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
952 __ movflt(dst_addr, src->as_xmm_float_reg());
953
954 } else if (src->is_double_xmm()) {
955 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
961 if (pop_fpu_stack) __ fstp_s (dst_addr);
962 else __ fst_s (dst_addr);
963
964 } else if (src->is_double_fpu()) {
965 assert(src->fpu_regnrLo() == 0, "argument must be on TOS");
966 Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
967 if (pop_fpu_stack) __ fstp_d (dst_addr);
968 else __ fst_d (dst_addr);
969
970 } else {
971 ShouldNotReachHere();
972 }
973 }
974
975
976 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
977 LIR_Address* to_addr = dest->as_address_ptr();
978 PatchingStub* patch = NULL;
979 Register compressed_src = rscratch1;
980
981 if (is_reference_type(type)) {
982 __ verify_oop(src->as_register());
983 #ifdef _LP64
984 if (UseCompressedOops && !wide) {
985 __ movptr(compressed_src, src->as_register());
986 __ encode_heap_oop(compressed_src);
987 if (patch_code != lir_patch_none) {
988 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
989 }
990 }
991 #endif
992 }
993
994 if (patch_code != lir_patch_none) {
995 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
996 Address toa = as_Address(to_addr);
997 assert(toa.disp() != 0, "must have");
998 }
999
1000 int null_check_here = code_offset();
1001 switch (type) {
1096 break;
1097
1098 default:
1099 ShouldNotReachHere();
1100 }
1101 if (info != NULL) {
1102 add_debug_info_for_null_check(null_check_here, info);
1103 }
1104
1105 if (patch_code != lir_patch_none) {
1106 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1107 }
1108 }
1109
1110
1111 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1112 assert(src->is_stack(), "should not call otherwise");
1113 assert(dest->is_register(), "should not call otherwise");
1114
1115 if (dest->is_single_cpu()) {
1116 if (is_reference_type(type)) {
1117 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1118 __ verify_oop(dest->as_register());
1119 } else if (type == T_METADATA) {
1120 __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1121 } else {
1122 __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
1123 }
1124
1125 } else if (dest->is_double_cpu()) {
1126 Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
1127 Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
1128 __ movptr(dest->as_register_lo(), src_addr_LO);
1129 NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
1130
1131 } else if (dest->is_single_xmm()) {
1132 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1133 __ movflt(dest->as_xmm_float_reg(), src_addr);
1134
1135 } else if (dest->is_double_xmm()) {
1136 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1137 __ movdbl(dest->as_xmm_double_reg(), src_addr);
1138
1139 } else if (dest->is_single_fpu()) {
1140 assert(dest->fpu_regnr() == 0, "dest must be TOS");
1141 Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1142 __ fld_s(src_addr);
1143
1144 } else if (dest->is_double_fpu()) {
1145 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1146 Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1147 __ fld_d(src_addr);
1148
1149 } else {
1150 ShouldNotReachHere();
1151 }
1152 }
1153
1154
1155 void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
1156 if (src->is_single_stack()) {
1157 if (is_reference_type(type)) {
1158 __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
1159 __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
1160 } else {
1161 #ifndef _LP64
1162 __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
1163 __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
1164 #else
1165 //no pushl on 64bits
1166 __ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
1167 __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
1168 #endif
1169 }
1170
1171 } else if (src->is_double_stack()) {
1172 #ifdef _LP64
1173 __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
1174 __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
1175 #else
1176 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1177 // push and pop the part at src + wordSize, adding wordSize for the previous push
1338 case T_SHORT: {
1339 Register dest_reg = dest->as_register();
1340 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1341 __ movswl(dest_reg, from_addr);
1342 } else {
1343 __ movw(dest_reg, from_addr);
1344 __ shll(dest_reg, 16);
1345 __ sarl(dest_reg, 16);
1346 }
1347 break;
1348 }
1349
1350 default:
1351 ShouldNotReachHere();
1352 }
1353
1354 if (patch != NULL) {
1355 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1356 }
1357
1358 if (is_reference_type(type)) {
1359 #ifdef _LP64
1360 if (UseCompressedOops && !wide) {
1361 __ decode_heap_oop(dest->as_register());
1362 }
1363 #endif
1364
1365 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1366 if (!UseZGC) {
1367 __ verify_oop(dest->as_register());
1368 }
1369 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1370 #ifdef _LP64
1371 if (UseCompressedClassPointers) {
1372 __ decode_klass_not_null(dest->as_register());
1373 }
1374 #endif
1375 }
1376 }
1377
1378
1576 __ cmpb(Address(op->klass()->as_register(),
1577 InstanceKlass::init_state_offset()),
1578 InstanceKlass::fully_initialized);
1579 __ jcc(Assembler::notEqual, *op->stub()->entry());
1580 }
1581 __ allocate_object(op->obj()->as_register(),
1582 op->tmp1()->as_register(),
1583 op->tmp2()->as_register(),
1584 op->header_size(),
1585 op->object_size(),
1586 op->klass()->as_register(),
1587 *op->stub()->entry());
1588 __ bind(*op->stub()->continuation());
1589 }
1590
1591 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1592 Register len = op->len()->as_register();
1593 LP64_ONLY( __ movslq(len, len); )
1594
1595 if (UseSlowPath ||
1596 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1597 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1598 __ jmp(*op->stub()->entry());
1599 } else {
1600 Register tmp1 = op->tmp1()->as_register();
1601 Register tmp2 = op->tmp2()->as_register();
1602 Register tmp3 = op->tmp3()->as_register();
1603 if (len == tmp1) {
1604 tmp1 = tmp3;
1605 } else if (len == tmp2) {
1606 tmp2 = tmp3;
1607 } else if (len == tmp3) {
1608 // everything is ok
1609 } else {
1610 __ mov(tmp3, len);
1611 }
1612 __ allocate_array(op->obj()->as_register(),
1613 len,
1614 tmp1,
1615 tmp2,
1616 arrayOopDesc::header_size(op->type()),
1617 array_element_size(op->type()),
2493 int r_hi = right->as_constant_ptr()->as_jint_hi();
2494 switch (code) {
2495 case lir_logic_and:
2496 __ andl(l_lo, r_lo);
2497 __ andl(l_hi, r_hi);
2498 break;
2499 case lir_logic_or:
2500 __ orl(l_lo, r_lo);
2501 __ orl(l_hi, r_hi);
2502 break;
2503 case lir_logic_xor:
2504 __ xorl(l_lo, r_lo);
2505 __ xorl(l_hi, r_hi);
2506 break;
2507 default: ShouldNotReachHere();
2508 }
2509 #endif // _LP64
2510 } else {
2511 #ifdef _LP64
2512 Register r_lo;
2513 if (is_reference_type(right->type())) {
2514 r_lo = right->as_register();
2515 } else {
2516 r_lo = right->as_register_lo();
2517 }
2518 #else
2519 Register r_lo = right->as_register_lo();
2520 Register r_hi = right->as_register_hi();
2521 assert(l_lo != r_hi, "overwriting registers");
2522 #endif
2523 switch (code) {
2524 case lir_logic_and:
2525 __ andptr(l_lo, r_lo);
2526 NOT_LP64(__ andptr(l_hi, r_hi);)
2527 break;
2528 case lir_logic_or:
2529 __ orptr(l_lo, r_lo);
2530 NOT_LP64(__ orptr(l_hi, r_hi);)
2531 break;
2532 case lir_logic_xor:
2533 __ xorptr(l_lo, r_lo);
2606 move_regs(lreg, rax);
2607
2608 int idivl_offset = __ corrected_idivl(rreg);
2609 if (ImplicitDiv0Checks) {
2610 add_debug_info_for_div0(idivl_offset, info);
2611 }
2612 if (code == lir_irem) {
2613 move_regs(rdx, dreg); // result is in rdx
2614 } else {
2615 move_regs(rax, dreg);
2616 }
2617 }
2618 }
2619
2620
2621 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
2622 if (opr1->is_single_cpu()) {
2623 Register reg1 = opr1->as_register();
2624 if (opr2->is_single_cpu()) {
2625 // cpu register - cpu register
2626 if (is_reference_type(opr1->type())) {
2627 __ cmpoop(reg1, opr2->as_register());
2628 } else {
2629 assert(!is_reference_type(opr2->type()), "cmp int, oop?");
2630 __ cmpl(reg1, opr2->as_register());
2631 }
2632 } else if (opr2->is_stack()) {
2633 // cpu register - stack
2634 if (is_reference_type(opr1->type())) {
2635 __ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2636 } else {
2637 __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
2638 }
2639 } else if (opr2->is_constant()) {
2640 // cpu register - constant
2641 LIR_Const* c = opr2->as_constant_ptr();
2642 if (c->type() == T_INT) {
2643 __ cmpl(reg1, c->as_jint());
2644 } else if (is_reference_type(c->type())) {
2645 // In 64bit oops are single register
2646 jobject o = c->as_jobject();
2647 if (o == NULL) {
2648 __ cmpptr(reg1, (int32_t)NULL_WORD);
2649 } else {
2650 __ cmpoop(reg1, o);
2651 }
2652 } else {
2653 fatal("unexpected type: %s", basictype_to_str(c->type()));
2654 }
2655 // cpu register - address
2656 } else if (opr2->is_address()) {
2657 if (op->info() != NULL) {
2658 add_debug_info_for_null_check_here(op->info());
2659 }
2660 __ cmpl(reg1, as_Address(opr2->as_address_ptr()));
2661 } else {
2662 ShouldNotReachHere();
2663 }
2664
2724 // xmm register - constant
2725 __ ucomisd(reg1, InternalAddress(double_constant(opr2->as_jdouble())));
2726 } else if (opr2->is_address()) {
2727 // xmm register - address
2728 if (op->info() != NULL) {
2729 add_debug_info_for_null_check_here(op->info());
2730 }
2731 __ ucomisd(reg1, as_Address(opr2->pointer()->as_address()));
2732 } else {
2733 ShouldNotReachHere();
2734 }
2735
2736 } else if(opr1->is_single_fpu() || opr1->is_double_fpu()) {
2737 assert(opr1->is_fpu_register() && opr1->fpu() == 0, "currently left-hand side must be on TOS (relax this restriction)");
2738 assert(opr2->is_fpu_register(), "both must be registers");
2739 __ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
2740
2741 } else if (opr1->is_address() && opr2->is_constant()) {
2742 LIR_Const* c = opr2->as_constant_ptr();
2743 #ifdef _LP64
2744 if (is_reference_type(c->type())) {
2745 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
2746 __ movoop(rscratch1, c->as_jobject());
2747 }
2748 #endif // LP64
2749 if (op->info() != NULL) {
2750 add_debug_info_for_null_check_here(op->info());
2751 }
2752 // special case: address - constant
2753 LIR_Address* addr = opr1->as_address_ptr();
2754 if (c->type() == T_INT) {
2755 __ cmpl(as_Address(addr), c->as_jint());
2756 } else if (is_reference_type(c->type())) {
2757 #ifdef _LP64
2758 // %%% Make this explode if addr isn't reachable until we figure out a
2759 // better strategy by giving noreg as the temp for as_Address
2760 __ cmpoop(rscratch1, as_Address(addr, noreg));
2761 #else
2762 __ cmpoop(as_Address(addr), c->as_jobject());
2763 #endif // _LP64
2764 } else {
2765 ShouldNotReachHere();
2766 }
2767
2768 } else {
2769 ShouldNotReachHere();
2770 }
2771 }
2772
2773 void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op) {
2774 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
2775 if (left->is_single_xmm()) {
2776 assert(right->is_single_xmm(), "must match");
3035
3036
3037 // This code replaces a call to arraycopy; no exception may
3038 // be thrown in this code, they must be thrown in the System.arraycopy
3039 // activation frame; we could save some checks if this would not be the case
3040 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3041 ciArrayKlass* default_type = op->expected_type();
3042 Register src = op->src()->as_register();
3043 Register dst = op->dst()->as_register();
3044 Register src_pos = op->src_pos()->as_register();
3045 Register dst_pos = op->dst_pos()->as_register();
3046 Register length = op->length()->as_register();
3047 Register tmp = op->tmp()->as_register();
3048
3049 __ resolve(ACCESS_READ, src);
3050 __ resolve(ACCESS_WRITE, dst);
3051
3052 CodeStub* stub = op->stub();
3053 int flags = op->flags();
3054 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3055 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3056
3057 // if we don't know anything, just go through the generic arraycopy
3058 if (default_type == NULL) {
3059 // save outgoing arguments on stack in case call to System.arraycopy is needed
3060 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3061 // for interpreter calling conventions. Now we have to do it in new style conventions.
3062 // For the moment until C1 gets the new register allocator I just force all the
3063 // args to the right place (except the register args) and then on the back side
3064 // reload the register args properly if we go slow path. Yuck
3065
3066 // These are proper for the calling convention
3067 store_parameter(length, 2);
3068 store_parameter(dst_pos, 1);
3069 store_parameter(dst, 0);
3070
3071 // these are just temporary placements until we need to reload
3072 store_parameter(src_pos, 3);
3073 store_parameter(src, 4);
3074 NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
3075
|