< prev index next >

src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp

Print this page
rev 56376 : 8231448: s390 and ppc - replace JVM type comparisons to T_OBJECT and T_ARRAY with call to is_reference_type


 955                     __ z_ly(dest->as_register(),   disp_value, disp_reg, src);
 956       }
 957       break;
 958     case T_ADDRESS:
 959       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 960         __ z_llgf(dest->as_register(), disp_value, disp_reg, src);
 961         __ decode_klass_not_null(dest->as_register());
 962       } else {
 963         __ z_lg(dest->as_register(), disp_value, disp_reg, src);
 964       }
 965       break;
 966     case T_ARRAY : // fall through
 967     case T_OBJECT:
 968     {
 969       if (UseCompressedOops && !wide) {
 970         __ z_llgf(dest->as_register(), disp_value, disp_reg, src);
 971         __ oop_decoder(dest->as_register(), dest->as_register(), true);
 972       } else {
 973         __ z_lg(dest->as_register(), disp_value, disp_reg, src);
 974       }

 975       break;
 976     }
 977     case T_FLOAT:
 978       if (short_disp) {
 979                     __ z_le(dest->as_float_reg(),  disp_value, disp_reg, src);
 980       } else {
 981                     __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src);
 982       }
 983       break;
 984     case T_DOUBLE:
 985       if (short_disp) {
 986                     __ z_ld(dest->as_double_reg(),  disp_value, disp_reg, src);
 987       } else {
 988                     __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src);
 989       }
 990       break;
 991     case T_LONG  :  __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break;
 992     default      : ShouldNotReachHere();
 993   }
 994   if (type == T_ARRAY || type == T_OBJECT) {
 995     __ verify_oop(dest->as_register());
 996   }
 997 
 998   if (patch != NULL) {
 999     patching_epilog(patch, patch_code, src, info);
1000   }
1001   if (info != NULL) add_debug_info_for_null_check(offset, info);
1002 }
1003 
1004 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1005   assert(src->is_stack(), "should not call otherwise");
1006   assert(dest->is_register(), "should not call otherwise");
1007 
1008   if (dest->is_single_cpu()) {
1009     if (type == T_ARRAY || type == T_OBJECT) {
1010       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1011       __ verify_oop(dest->as_register());
1012     } else if (type == T_METADATA) {
1013       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1014     } else {
1015       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false);
1016     }
1017   } else if (dest->is_double_cpu()) {
1018     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix());
1019     __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true);
1020   } else if (dest->is_single_fpu()) {
1021     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1022     __ mem2freg_opt(dest->as_float_reg(), src_addr, false);
1023   } else if (dest->is_double_fpu()) {
1024     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1025     __ mem2freg_opt(dest->as_double_reg(), src_addr, true);
1026   } else {
1027     ShouldNotReachHere();
1028   }
1029 }
1030 
1031 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1032   assert(src->is_register(), "should not call otherwise");
1033   assert(dest->is_stack(), "should not call otherwise");
1034 
1035   if (src->is_single_cpu()) {
1036     const Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1037     if (type == T_OBJECT || type == T_ARRAY) {
1038       __ verify_oop(src->as_register());
1039       __ reg2mem_opt(src->as_register(), dst, true);
1040     } else if (type == T_METADATA) {
1041       __ reg2mem_opt(src->as_register(), dst, true);
1042     } else {
1043       __ reg2mem_opt(src->as_register(), dst, false);
1044     }
1045   } else if (src->is_double_cpu()) {
1046     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix());
1047     __ reg2mem_opt(src->as_register_lo(), dstLO, true);
1048   } else if (src->is_single_fpu()) {
1049     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1050     __ freg2mem_opt(src->as_float_reg(), dst_addr, false);
1051   } else if (src->is_double_fpu()) {
1052     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1053     __ freg2mem_opt(src->as_double_reg(), dst_addr, true);
1054   } else {
1055     ShouldNotReachHere();
1056   }
1057 }


1063       assert(to_reg->is_double_fpu(), "should match");
1064       __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg());
1065     } else {
1066       // float to float moves
1067       assert(to_reg->is_single_fpu(), "should match");
1068       __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg());
1069     }
1070   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1071     if (from_reg->is_double_cpu()) {
1072       __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1073     } else if (to_reg->is_double_cpu()) {
1074       // int to int moves
1075       __ z_lgr(to_reg->as_register_lo(), from_reg->as_register());
1076     } else {
1077       // int to int moves
1078       __ z_lgr(to_reg->as_register(), from_reg->as_register());
1079     }
1080   } else {
1081     ShouldNotReachHere();
1082   }
1083   if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1084     __ verify_oop(to_reg->as_register());
1085   }
1086 }
1087 
1088 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
1089                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1090                             bool wide, bool unaligned) {
1091   assert(type != T_METADATA, "store of metadata ptr not supported");
1092   LIR_Address* addr = dest_opr->as_address_ptr();
1093 
1094   Register dest = addr->base()->as_pointer_register();
1095   Register disp_reg = Z_R0;
1096   int disp_value = addr->disp();
1097   bool needs_patching = (patch_code != lir_patch_none);
1098 
1099   if (addr->base()->is_oop_register()) {
1100     __ verify_oop(dest);
1101   }
1102 
1103   PatchingStub* patch = NULL;


1114       if (needs_patching) {
1115         __ load_const(Z_R1_scratch, (intptr_t)0);
1116       } else {
1117         __ load_const_optimized(Z_R1_scratch, disp_value);
1118       }
1119       disp_reg = Z_R1_scratch;
1120       disp_value = 0;
1121     }
1122   } else {
1123     if (!Immediate::is_simm20(disp_value)) {
1124       __ load_const_optimized(Z_R1_scratch, disp_value);
1125       __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register());
1126       disp_reg = Z_R1_scratch;
1127       disp_value = 0;
1128     }
1129     disp_reg = addr->index()->as_pointer_register();
1130   }
1131 
1132   assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
1133 
1134   if (type == T_ARRAY || type == T_OBJECT) {
1135     __ verify_oop(from->as_register());
1136   }
1137 
1138   bool short_disp = Immediate::is_uimm12(disp_value);
1139 
1140   // Remember the offset of the store. The patching_epilog must be done
1141   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1142   // entered in increasing order.
1143   int offset = code_offset();
1144   switch (type) {
1145     case T_BOOLEAN: // fall through
1146     case T_BYTE  :
1147       if (short_disp) {
1148                     __ z_stc(from->as_register(),  disp_value, disp_reg, dest);
1149       } else {
1150                     __ z_stcy(from->as_register(), disp_value, disp_reg, dest);
1151       }
1152       break;
1153     case T_CHAR  : // fall through
1154     case T_SHORT :


1277 
1278   __ set_inst_mark();
1279   AddressLiteral a((address)-1);
1280   success = success && __ load_const_from_toc(Z_R1, a);
1281   if (!success) {
1282     bailout("const section overflow");
1283     return;
1284   }
1285 
1286   __ z_br(Z_R1);
1287   assert(__ offset() - start <= call_stub_size(), "stub too big");
1288   __ end_a_stub(); // Update current stubs pointer and restore insts_end.
1289 }
1290 
1291 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1292   bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual;
1293   if (opr1->is_single_cpu()) {
1294     Register reg1 = opr1->as_register();
1295     if (opr2->is_single_cpu()) {
1296       // cpu register - cpu register
1297       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1298         __ z_clgr(reg1, opr2->as_register());
1299       } else {
1300         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1301         if (unsigned_comp) {
1302           __ z_clr(reg1, opr2->as_register());
1303         } else {
1304           __ z_cr(reg1, opr2->as_register());
1305         }
1306       }
1307     } else if (opr2->is_stack()) {
1308       // cpu register - stack
1309       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1310         __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1311       } else {
1312         if (unsigned_comp) {
1313           __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1314         } else {
1315           __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1316         }
1317       }
1318     } else if (opr2->is_constant()) {
1319       // cpu register - constant
1320       LIR_Const* c = opr2->as_constant_ptr();
1321       if (c->type() == T_INT) {
1322         if (unsigned_comp) {
1323           __ z_clfi(reg1, c->as_jint());
1324         } else {
1325           __ z_cfi(reg1, c->as_jint());
1326         }
1327       } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
1328         // In 64bit oops are single register.
1329         jobject o = c->as_jobject();
1330         if (o == NULL) {
1331           __ z_ltgr(reg1, reg1);
1332         } else {
1333           jobject2reg(o, Z_R1_scratch);
1334           __ z_cgr(reg1, Z_R1_scratch);
1335         }
1336       } else {
1337         fatal("unexpected type: %s", basictype_to_str(c->type()));
1338       }
1339       // cpu register - address
1340     } else if (opr2->is_address()) {
1341       if (op->info() != NULL) {
1342         add_debug_info_for_null_check_here(op->info());
1343       }
1344       if (unsigned_comp) {
1345         __ z_cly(reg1, as_Address(opr2->as_address_ptr()));
1346       } else {
1347         __ z_cy(reg1, as_Address(opr2->as_address_ptr()));


1750     }
1751     move_regs(reg, dst->as_register());
1752   } else {
1753     Register l_lo = left->as_register_lo();
1754     if (right->is_constant()) {
1755       __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong());
1756       switch (code) {
1757         case lir_logic_and:
1758           __ z_ngr(l_lo, Z_R1_scratch);
1759           break;
1760         case lir_logic_or:
1761           __ z_ogr(l_lo, Z_R1_scratch);
1762           break;
1763         case lir_logic_xor:
1764           __ z_xgr(l_lo, Z_R1_scratch);
1765           break;
1766         default: ShouldNotReachHere();
1767       }
1768     } else {
1769       Register r_lo;
1770       if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
1771         r_lo = right->as_register();
1772       } else {
1773         r_lo = right->as_register_lo();
1774       }
1775       switch (code) {
1776         case lir_logic_and:
1777           __ z_ngr(l_lo, r_lo);
1778           break;
1779         case lir_logic_or:
1780           __ z_ogr(l_lo, r_lo);
1781           break;
1782         case lir_logic_xor:
1783           __ z_xgr(l_lo, r_lo);
1784           break;
1785         default: ShouldNotReachHere();
1786       }
1787     }
1788 
1789     Register dst_lo = dst->as_register_lo();
1790 


2396       __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized);
2397     }
2398     __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far.
2399   }
2400   __ allocate_object(op->obj()->as_register(),
2401                      op->tmp1()->as_register(),
2402                      op->tmp2()->as_register(),
2403                      op->header_size(),
2404                      op->object_size(),
2405                      op->klass()->as_register(),
2406                      *op->stub()->entry());
2407   __ bind(*op->stub()->continuation());
2408   __ verify_oop(op->obj()->as_register());
2409 }
2410 
2411 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2412   Register len = op->len()->as_register();
2413   __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend
2414 
2415   if (UseSlowPath ||
2416       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2417       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2418     __ z_brul(*op->stub()->entry());
2419   } else {
2420     __ allocate_array(op->obj()->as_register(),
2421                       op->len()->as_register(),
2422                       op->tmp1()->as_register(),
2423                       op->tmp2()->as_register(),
2424                       arrayOopDesc::header_size(op->type()),
2425                       type2aelembytes(op->type()),
2426                       op->klass()->as_register(),
2427                       *op->stub()->entry());
2428   }
2429   __ bind(*op->stub()->continuation());
2430 }
2431 
2432 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
2433                                         Register recv, Register tmp1, Label* update_done) {
2434   uint i;
2435   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2436     Label next_test;
2437     // See if the receiver is receiver[n].




 955                     __ z_ly(dest->as_register(),   disp_value, disp_reg, src);
 956       }
 957       break;
 958     case T_ADDRESS:
 959       if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 960         __ z_llgf(dest->as_register(), disp_value, disp_reg, src);
 961         __ decode_klass_not_null(dest->as_register());
 962       } else {
 963         __ z_lg(dest->as_register(), disp_value, disp_reg, src);
 964       }
 965       break;
 966     case T_ARRAY : // fall through
 967     case T_OBJECT:
 968     {
 969       if (UseCompressedOops && !wide) {
 970         __ z_llgf(dest->as_register(), disp_value, disp_reg, src);
 971         __ oop_decoder(dest->as_register(), dest->as_register(), true);
 972       } else {
 973         __ z_lg(dest->as_register(), disp_value, disp_reg, src);
 974       }
 975       __ verify_oop(dest->as_register());
 976       break;
 977     }
 978     case T_FLOAT:
 979       if (short_disp) {
 980                     __ z_le(dest->as_float_reg(),  disp_value, disp_reg, src);
 981       } else {
 982                     __ z_ley(dest->as_float_reg(), disp_value, disp_reg, src);
 983       }
 984       break;
 985     case T_DOUBLE:
 986       if (short_disp) {
 987                     __ z_ld(dest->as_double_reg(),  disp_value, disp_reg, src);
 988       } else {
 989                     __ z_ldy(dest->as_double_reg(), disp_value, disp_reg, src);
 990       }
 991       break;
 992     case T_LONG  :  __ z_lg(dest->as_register_lo(), disp_value, disp_reg, src); break;
 993     default      : ShouldNotReachHere();
 994   }



 995 
 996   if (patch != NULL) {
 997     patching_epilog(patch, patch_code, src, info);
 998   }
 999   if (info != NULL) add_debug_info_for_null_check(offset, info);
1000 }
1001 
1002 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
1003   assert(src->is_stack(), "should not call otherwise");
1004   assert(dest->is_register(), "should not call otherwise");
1005 
1006   if (dest->is_single_cpu()) {
1007     if (is_reference_type(type)) {
1008       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1009       __ verify_oop(dest->as_register());
1010     } else if (type == T_METADATA) {
1011       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), true);
1012     } else {
1013       __ mem2reg_opt(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()), false);
1014     }
1015   } else if (dest->is_double_cpu()) {
1016     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix());
1017     __ mem2reg_opt(dest->as_register_lo(), src_addr_LO, true);
1018   } else if (dest->is_single_fpu()) {
1019     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
1020     __ mem2freg_opt(dest->as_float_reg(), src_addr, false);
1021   } else if (dest->is_double_fpu()) {
1022     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
1023     __ mem2freg_opt(dest->as_double_reg(), src_addr, true);
1024   } else {
1025     ShouldNotReachHere();
1026   }
1027 }
1028 
1029 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
1030   assert(src->is_register(), "should not call otherwise");
1031   assert(dest->is_stack(), "should not call otherwise");
1032 
1033   if (src->is_single_cpu()) {
1034     const Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
1035     if (is_reference_type(type)) {
1036       __ verify_oop(src->as_register());
1037       __ reg2mem_opt(src->as_register(), dst, true);
1038     } else if (type == T_METADATA) {
1039       __ reg2mem_opt(src->as_register(), dst, true);
1040     } else {
1041       __ reg2mem_opt(src->as_register(), dst, false);
1042     }
1043   } else if (src->is_double_cpu()) {
1044     Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix());
1045     __ reg2mem_opt(src->as_register_lo(), dstLO, true);
1046   } else if (src->is_single_fpu()) {
1047     Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
1048     __ freg2mem_opt(src->as_float_reg(), dst_addr, false);
1049   } else if (src->is_double_fpu()) {
1050     Address dst_addr = frame_map()->address_for_slot(dest->double_stack_ix());
1051     __ freg2mem_opt(src->as_double_reg(), dst_addr, true);
1052   } else {
1053     ShouldNotReachHere();
1054   }
1055 }


1061       assert(to_reg->is_double_fpu(), "should match");
1062       __ z_ldr(to_reg->as_double_reg(), from_reg->as_double_reg());
1063     } else {
1064       // float to float moves
1065       assert(to_reg->is_single_fpu(), "should match");
1066       __ z_ler(to_reg->as_float_reg(), from_reg->as_float_reg());
1067     }
1068   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1069     if (from_reg->is_double_cpu()) {
1070       __ z_lgr(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1071     } else if (to_reg->is_double_cpu()) {
1072       // int to int moves
1073       __ z_lgr(to_reg->as_register_lo(), from_reg->as_register());
1074     } else {
1075       // int to int moves
1076       __ z_lgr(to_reg->as_register(), from_reg->as_register());
1077     }
1078   } else {
1079     ShouldNotReachHere();
1080   }
1081   if (is_reference_type(to_reg->type())) {
1082     __ verify_oop(to_reg->as_register());
1083   }
1084 }
1085 
1086 void LIR_Assembler::reg2mem(LIR_Opr from, LIR_Opr dest_opr, BasicType type,
1087                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1088                             bool wide, bool unaligned) {
1089   assert(type != T_METADATA, "store of metadata ptr not supported");
1090   LIR_Address* addr = dest_opr->as_address_ptr();
1091 
1092   Register dest = addr->base()->as_pointer_register();
1093   Register disp_reg = Z_R0;
1094   int disp_value = addr->disp();
1095   bool needs_patching = (patch_code != lir_patch_none);
1096 
1097   if (addr->base()->is_oop_register()) {
1098     __ verify_oop(dest);
1099   }
1100 
1101   PatchingStub* patch = NULL;


1112       if (needs_patching) {
1113         __ load_const(Z_R1_scratch, (intptr_t)0);
1114       } else {
1115         __ load_const_optimized(Z_R1_scratch, disp_value);
1116       }
1117       disp_reg = Z_R1_scratch;
1118       disp_value = 0;
1119     }
1120   } else {
1121     if (!Immediate::is_simm20(disp_value)) {
1122       __ load_const_optimized(Z_R1_scratch, disp_value);
1123       __ z_la(Z_R1_scratch, 0, Z_R1_scratch, addr->index()->as_register());
1124       disp_reg = Z_R1_scratch;
1125       disp_value = 0;
1126     }
1127     disp_reg = addr->index()->as_pointer_register();
1128   }
1129 
1130   assert(disp_reg != Z_R0 || Immediate::is_simm20(disp_value), "should have set this up");
1131 
1132   if (is_reference_type(type)) {
1133     __ verify_oop(from->as_register());
1134   }
1135 
1136   bool short_disp = Immediate::is_uimm12(disp_value);
1137 
1138   // Remember the offset of the store. The patching_epilog must be done
1139   // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get
1140   // entered in increasing order.
1141   int offset = code_offset();
1142   switch (type) {
1143     case T_BOOLEAN: // fall through
1144     case T_BYTE  :
1145       if (short_disp) {
1146                     __ z_stc(from->as_register(),  disp_value, disp_reg, dest);
1147       } else {
1148                     __ z_stcy(from->as_register(), disp_value, disp_reg, dest);
1149       }
1150       break;
1151     case T_CHAR  : // fall through
1152     case T_SHORT :


1275 
1276   __ set_inst_mark();
1277   AddressLiteral a((address)-1);
1278   success = success && __ load_const_from_toc(Z_R1, a);
1279   if (!success) {
1280     bailout("const section overflow");
1281     return;
1282   }
1283 
1284   __ z_br(Z_R1);
1285   assert(__ offset() - start <= call_stub_size(), "stub too big");
1286   __ end_a_stub(); // Update current stubs pointer and restore insts_end.
1287 }
1288 
1289 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1290   bool unsigned_comp = condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual;
1291   if (opr1->is_single_cpu()) {
1292     Register reg1 = opr1->as_register();
1293     if (opr2->is_single_cpu()) {
1294       // cpu register - cpu register
1295       if (is_reference_type(opr1->type())) {
1296         __ z_clgr(reg1, opr2->as_register());
1297       } else {
1298         assert(! is_reference_type(opr2->type()), "cmp int, oop?");
1299         if (unsigned_comp) {
1300           __ z_clr(reg1, opr2->as_register());
1301         } else {
1302           __ z_cr(reg1, opr2->as_register());
1303         }
1304       }
1305     } else if (opr2->is_stack()) {
1306       // cpu register - stack
1307       if (is_reference_type(opr1->type())) {
1308         __ z_cg(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1309       } else {
1310         if (unsigned_comp) {
1311           __ z_cly(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1312         } else {
1313           __ z_cy(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
1314         }
1315       }
1316     } else if (opr2->is_constant()) {
1317       // cpu register - constant
1318       LIR_Const* c = opr2->as_constant_ptr();
1319       if (c->type() == T_INT) {
1320         if (unsigned_comp) {
1321           __ z_clfi(reg1, c->as_jint());
1322         } else {
1323           __ z_cfi(reg1, c->as_jint());
1324         }
1325       } else if (is_reference_type(c->type())) {
1326         // In 64bit oops are single register.
1327         jobject o = c->as_jobject();
1328         if (o == NULL) {
1329           __ z_ltgr(reg1, reg1);
1330         } else {
1331           jobject2reg(o, Z_R1_scratch);
1332           __ z_cgr(reg1, Z_R1_scratch);
1333         }
1334       } else {
1335         fatal("unexpected type: %s", basictype_to_str(c->type()));
1336       }
1337       // cpu register - address
1338     } else if (opr2->is_address()) {
1339       if (op->info() != NULL) {
1340         add_debug_info_for_null_check_here(op->info());
1341       }
1342       if (unsigned_comp) {
1343         __ z_cly(reg1, as_Address(opr2->as_address_ptr()));
1344       } else {
1345         __ z_cy(reg1, as_Address(opr2->as_address_ptr()));


1748     }
1749     move_regs(reg, dst->as_register());
1750   } else {
1751     Register l_lo = left->as_register_lo();
1752     if (right->is_constant()) {
1753       __ load_const_optimized(Z_R1_scratch, right->as_constant_ptr()->as_jlong());
1754       switch (code) {
1755         case lir_logic_and:
1756           __ z_ngr(l_lo, Z_R1_scratch);
1757           break;
1758         case lir_logic_or:
1759           __ z_ogr(l_lo, Z_R1_scratch);
1760           break;
1761         case lir_logic_xor:
1762           __ z_xgr(l_lo, Z_R1_scratch);
1763           break;
1764         default: ShouldNotReachHere();
1765       }
1766     } else {
1767       Register r_lo;
1768       if (is_reference_type(right->type())) {
1769         r_lo = right->as_register();
1770       } else {
1771         r_lo = right->as_register_lo();
1772       }
1773       switch (code) {
1774         case lir_logic_and:
1775           __ z_ngr(l_lo, r_lo);
1776           break;
1777         case lir_logic_or:
1778           __ z_ogr(l_lo, r_lo);
1779           break;
1780         case lir_logic_xor:
1781           __ z_xgr(l_lo, r_lo);
1782           break;
1783         default: ShouldNotReachHere();
1784       }
1785     }
1786 
1787     Register dst_lo = dst->as_register_lo();
1788 


2394       __ z_cliy(state_offset, iklass, InstanceKlass::fully_initialized);
2395     }
2396     __ branch_optimized(Assembler::bcondNotEqual, *op->stub()->entry()); // Use long branch, because slow_case might be far.
2397   }
2398   __ allocate_object(op->obj()->as_register(),
2399                      op->tmp1()->as_register(),
2400                      op->tmp2()->as_register(),
2401                      op->header_size(),
2402                      op->object_size(),
2403                      op->klass()->as_register(),
2404                      *op->stub()->entry());
2405   __ bind(*op->stub()->continuation());
2406   __ verify_oop(op->obj()->as_register());
2407 }
2408 
2409 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2410   Register len = op->len()->as_register();
2411   __ move_reg_if_needed(len, T_LONG, len, T_INT); // sign extend
2412 
2413   if (UseSlowPath ||
2414       (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2415       (!UseFastNewTypeArray   && (!is_reference_type(op->type())))) {
2416     __ z_brul(*op->stub()->entry());
2417   } else {
2418     __ allocate_array(op->obj()->as_register(),
2419                       op->len()->as_register(),
2420                       op->tmp1()->as_register(),
2421                       op->tmp2()->as_register(),
2422                       arrayOopDesc::header_size(op->type()),
2423                       type2aelembytes(op->type()),
2424                       op->klass()->as_register(),
2425                       *op->stub()->entry());
2426   }
2427   __ bind(*op->stub()->continuation());
2428 }
2429 
2430 void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data,
2431                                         Register recv, Register tmp1, Label* update_done) {
2432   uint i;
2433   for (i = 0; i < VirtualCallData::row_limit(); i++) {
2434     Label next_test;
2435     // See if the receiver is receiver[n].


< prev index next >