< prev index next >

src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp

Print this page
rev 56646 : 8231757: [ppc] Fix VerifyOops. Errors show since 8231058.
Summary: Also make the checks print the wrong value and where a failure occurred.


 726     assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case");
 727     __ load_const_optimized(R0, offset);
 728     store_offset = store(from_reg, base, R0, type, wide);
 729   } else {
 730     store_offset = code_offset();
 731     switch (type) {
 732       case T_BOOLEAN: // fall through
 733       case T_BYTE  : __ stb(from_reg->as_register(), offset, base); break;
 734       case T_CHAR  :
 735       case T_SHORT : __ sth(from_reg->as_register(), offset, base); break;
 736       case T_INT   : __ stw(from_reg->as_register(), offset, base); break;
 737       case T_LONG  : __ std(from_reg->as_register_lo(), offset, base); break;
 738       case T_ADDRESS:
 739       case T_METADATA: __ std(from_reg->as_register(), offset, base); break;
 740       case T_ARRAY : // fall through
 741       case T_OBJECT:
 742         {
 743           if (UseCompressedOops && !wide) {
 744             // Encoding done in caller
 745             __ stw(from_reg->as_register(), offset, base);

 746           } else {
 747             __ std(from_reg->as_register(), offset, base);

 748           }
 749           __ verify_oop(from_reg->as_register());
 750           break;
 751         }
 752       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
 753       case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break;
 754       default      : ShouldNotReachHere();
 755     }
 756   }
 757   return store_offset;
 758 }
 759 
 760 
 761 // Attention: caller must encode oop if needed
 762 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 763   int store_offset = code_offset();
 764   switch (type) {
 765     case T_BOOLEAN: // fall through
 766     case T_BYTE  : __ stbx(from_reg->as_register(), base, disp); break;
 767     case T_CHAR  :
 768     case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break;
 769     case T_INT   : __ stwx(from_reg->as_register(), base, disp); break;
 770     case T_LONG  :
 771 #ifdef _LP64
 772       __ stdx(from_reg->as_register_lo(), base, disp);
 773 #else
 774       Unimplemented();
 775 #endif
 776       break;
 777     case T_ADDRESS:
 778       __ stdx(from_reg->as_register(), base, disp);
 779       break;
 780     case T_ARRAY : // fall through
 781     case T_OBJECT:
 782       {
 783         if (UseCompressedOops && !wide) {
 784           // Encoding done in caller.
 785           __ stwx(from_reg->as_register(), base, disp);

 786         } else {
 787           __ stdx(from_reg->as_register(), base, disp);

 788         }
 789         __ verify_oop(from_reg->as_register()); // kills R0
 790         break;
 791       }
 792     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
 793     case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break;
 794     default      : ShouldNotReachHere();
 795   }
 796   return store_offset;
 797 }
 798 
 799 
 800 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
 801   int load_offset;
 802   if (!Assembler::is_simm16(offset)) {
 803     // For offsets larger than a simm16 we setup the offset.
 804     __ load_const_optimized(R0, offset);
 805     load_offset = load(base, R0, to_reg, type, wide);
 806   } else {
 807     load_offset = code_offset();
 808     switch(type) {
 809       case T_BOOLEAN: // fall through


 814       case T_INT   :   __ lwa(to_reg->as_register(), offset, base); break;
 815       case T_LONG  :   __ ld(to_reg->as_register_lo(), offset, base); break;
 816       case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
 817       case T_ADDRESS:
 818         if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
 819           __ lwz(to_reg->as_register(), offset, base);
 820           __ decode_klass_not_null(to_reg->as_register());
 821         } else {
 822           __ ld(to_reg->as_register(), offset, base);
 823         }
 824         break;
 825       case T_ARRAY : // fall through
 826       case T_OBJECT:
 827         {
 828           if (UseCompressedOops && !wide) {
 829             __ lwz(to_reg->as_register(), offset, base);
 830             __ decode_heap_oop(to_reg->as_register());
 831           } else {
 832             __ ld(to_reg->as_register(), offset, base);
 833           }
 834           __ verify_oop(to_reg->as_register());


 835           break;
 836         }
 837       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
 838       case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
 839       default      : ShouldNotReachHere();
 840     }
 841   }
 842   return load_offset;
 843 }
 844 
 845 
 846 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 847   int load_offset = code_offset();
 848   switch(type) {
 849     case T_BOOLEAN: // fall through
 850     case T_BYTE  :  __ lbzx(to_reg->as_register(), base, disp);
 851                     __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 852     case T_CHAR  :  __ lhzx(to_reg->as_register(), base, disp); break;
 853     case T_SHORT :  __ lhax(to_reg->as_register(), base, disp); break;
 854     case T_INT   :  __ lwax(to_reg->as_register(), base, disp); break;
 855     case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break;
 856     case T_ARRAY : // fall through
 857     case T_OBJECT:
 858       {
 859         if (UseCompressedOops && !wide) {
 860           __ lwzx(to_reg->as_register(), base, disp);
 861           __ decode_heap_oop(to_reg->as_register());
 862         } else {
 863           __ ldx(to_reg->as_register(), base, disp);
 864         }
 865         __ verify_oop(to_reg->as_register());


 866         break;
 867       }
 868     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
 869     case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break;
 870     case T_LONG  :
 871 #ifdef _LP64
 872       __ ldx(to_reg->as_register_lo(), base, disp);
 873 #else
 874       Unimplemented();
 875 #endif
 876       break;
 877     default      : ShouldNotReachHere();
 878   }
 879   return load_offset;
 880 }
 881 
 882 
 883 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 884   LIR_Const* c = src->as_constant_ptr();
 885   Register src_reg = R0;


1124 
1125 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1126                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1127 
1128   assert(type != T_METADATA, "load of metadata ptr not supported");
1129   LIR_Address* addr = src_opr->as_address_ptr();
1130   LIR_Opr to_reg = dest;
1131 
1132   Register src = addr->base()->as_pointer_register();
1133   Register disp_reg = noreg;
1134   int disp_value = addr->disp();
1135   bool needs_patching = (patch_code != lir_patch_none);
1136   // null check for large offsets in LIRGenerator::do_LoadField
1137   bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;
1138 
1139   if (info != NULL && needs_explicit_null_check) {
1140     explicit_null_check(src, info);
1141   }
1142 
1143   if (addr->base()->type() == T_OBJECT) {
1144     __ verify_oop(src);
1145   }
1146 
1147   PatchingStub* patch = NULL;
1148   if (needs_patching) {
1149     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1150     assert(!to_reg->is_double_cpu() ||
1151            patch_code == lir_patch_none ||
1152            patch_code == lir_patch_normal, "patching doesn't match register");
1153   }
1154 
1155   if (addr->index()->is_illegal()) {
1156     if (!Assembler::is_simm16(disp_value)) {
1157       if (needs_patching) {
1158         __ load_const32(R0, 0); // patchable int
1159       } else {
1160         __ load_const_optimized(R0, disp_value);
1161       }
1162       disp_reg = R0;
1163     }
1164   } else {


1221       __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg());
1222     } else {
1223       // float to float moves
1224       assert(to_reg->is_single_fpu(), "should match");
1225       __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg());
1226     }
1227   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1228     if (from_reg->is_double_cpu()) {
1229       __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1230     } else if (to_reg->is_double_cpu()) {
1231       // int to int moves
1232       __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register());
1233     } else {
1234       // int to int moves
1235       __ mr_if_needed(to_reg->as_register(), from_reg->as_register());
1236     }
1237   } else {
1238     ShouldNotReachHere();
1239   }
1240   if (is_reference_type(to_reg->type())) {
1241     __ verify_oop(to_reg->as_register());
1242   }
1243 }
1244 
1245 
1246 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1247                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1248                             bool wide, bool unaligned) {
1249   assert(type != T_METADATA, "store of metadata ptr not supported");
1250   LIR_Address* addr = dest->as_address_ptr();
1251 
1252   Register src = addr->base()->as_pointer_register();
1253   Register disp_reg = noreg;
1254   int disp_value = addr->disp();
1255   bool needs_patching = (patch_code != lir_patch_none);
1256   bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&
1257                       CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;
1258   bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
1259   bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
1260   // Null check for large offsets in LIRGenerator::do_StoreField.
1261   bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;
1262 
1263   if (info != NULL && needs_explicit_null_check) {
1264     explicit_null_check(src, info);
1265   }
1266 
1267   if (addr->base()->is_oop_register()) {
1268     __ verify_oop(src);
1269   }
1270 
1271   PatchingStub* patch = NULL;
1272   if (needs_patching) {
1273     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1274     assert(!from_reg->is_double_cpu() ||
1275            patch_code == lir_patch_none ||
1276            patch_code == lir_patch_normal, "patching doesn't match register");
1277   }
1278 
1279   if (addr->index()->is_illegal()) {
1280     if (load_disp) {
1281       disp_reg = use_R29 ? R29_TOC : R0;
1282       if (needs_patching) {
1283         __ load_const32(disp_reg, 0); // patchable int
1284       } else {
1285         __ load_const_optimized(disp_reg, disp_value);
1286       }
1287     }
1288   } else {


2291     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2292       explicit_null_check(op->klass()->as_register(), op->stub()->info());
2293     } else {
2294       add_debug_info_for_null_check_here(op->stub()->info());
2295     }
2296     __ lbz(op->tmp1()->as_register(),
2297            in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
2298     __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2299     __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
2300   }
2301   __ allocate_object(op->obj()->as_register(),
2302                      op->tmp1()->as_register(),
2303                      op->tmp2()->as_register(),
2304                      op->tmp3()->as_register(),
2305                      op->header_size(),
2306                      op->object_size(),
2307                      op->klass()->as_register(),
2308                      *op->stub()->entry());
2309 
2310   __ bind(*op->stub()->continuation());
2311   __ verify_oop(op->obj()->as_register());
2312 }
2313 
2314 
2315 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2316   LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
2317   if (UseSlowPath ||
2318       (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2319       (!UseFastNewTypeArray   && (!is_reference_type(op->type())))) {
2320     __ b(*op->stub()->entry());
2321   } else {
2322     __ allocate_array(op->obj()->as_register(),
2323                       op->len()->as_register(),
2324                       op->tmp1()->as_register(),
2325                       op->tmp2()->as_register(),
2326                       op->tmp3()->as_register(),
2327                       arrayOopDesc::header_size(op->type()),
2328                       type2aelembytes(op->type()),
2329                       op->klass()->as_register(),
2330                       *op->stub()->entry());
2331   }


2516     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2517     __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2518     __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);
2519     __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2520   }
2521 
2522   __ bind(*failure);
2523 }
2524 
2525 
2526 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2527   LIR_Code code = op->code();
2528   if (code == lir_store_check) {
2529     Register value = op->object()->as_register();
2530     Register array = op->array()->as_register();
2531     Register k_RInfo = op->tmp1()->as_register();
2532     Register klass_RInfo = op->tmp2()->as_register();
2533     Register Rtmp1 = op->tmp3()->as_register();
2534     bool should_profile = op->should_profile();
2535 
2536     __ verify_oop(value);
2537     CodeStub* stub = op->stub();
2538     // Check if it needs to be profiled.
2539     ciMethodData* md = NULL;
2540     ciProfileData* data = NULL;
2541     int mdo_offset_bias = 0;
2542     if (should_profile) {
2543       ciMethod* method = op->profiled_method();
2544       assert(method != NULL, "Should have method");
2545       setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2546     }
2547     Label profile_cast_success, failure, done;
2548     Label *success_target = should_profile ? &profile_cast_success : &done;
2549 
2550     __ cmpdi(CCR0, value, 0);
2551     if (should_profile) {
2552       Label not_null;
2553       __ bne(CCR0, not_null);
2554       Register mdo      = k_RInfo;
2555       Register data_val = Rtmp1;
2556       metadata2reg(md->constant_encoding(), mdo);


3069 
3070 
3071 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3072   Register obj = op->obj()->as_register();
3073   Register tmp = op->tmp()->as_pointer_register();
3074   LIR_Address* mdo_addr = op->mdp()->as_address_ptr();
3075   ciKlass* exact_klass = op->exact_klass();
3076   intptr_t current_klass = op->current_klass();
3077   bool not_null = op->not_null();
3078   bool no_conflict = op->no_conflict();
3079 
3080   Label Lupdate, Ldo_update, Ldone;
3081 
3082   bool do_null = !not_null;
3083   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3084   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3085 
3086   assert(do_null || do_update, "why are we here?");
3087   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3088 
3089   __ verify_oop(obj);
3090 
3091   if (do_null) {
3092     if (!TypeEntries::was_null_seen(current_klass)) {
3093       __ cmpdi(CCR0, obj, 0);
3094       __ bne(CCR0, Lupdate);
3095       __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3096       __ ori(R0, R0, TypeEntries::null_seen);
3097       if (do_update) {
3098         __ b(Ldo_update);
3099       } else {
3100         __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3101       }
3102     } else {
3103       if (do_update) {
3104         __ cmpdi(CCR0, obj, 0);
3105         __ beq(CCR0, Ldone);
3106       }
3107     }
3108 #ifdef ASSERT
3109   } else {




 726     assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case");
 727     __ load_const_optimized(R0, offset);
 728     store_offset = store(from_reg, base, R0, type, wide);
 729   } else {
 730     store_offset = code_offset();
 731     switch (type) {
 732       case T_BOOLEAN: // fall through
 733       case T_BYTE  : __ stb(from_reg->as_register(), offset, base); break;
 734       case T_CHAR  :
 735       case T_SHORT : __ sth(from_reg->as_register(), offset, base); break;
 736       case T_INT   : __ stw(from_reg->as_register(), offset, base); break;
 737       case T_LONG  : __ std(from_reg->as_register_lo(), offset, base); break;
 738       case T_ADDRESS:
 739       case T_METADATA: __ std(from_reg->as_register(), offset, base); break;
 740       case T_ARRAY : // fall through
 741       case T_OBJECT:
 742         {
 743           if (UseCompressedOops && !wide) {
 744             // Encoding done in caller
 745             __ stw(from_reg->as_register(), offset, base);
 746             __ verify_coop(from_reg->as_register(), FILE_AND_LINE);
 747           } else {
 748             __ std(from_reg->as_register(), offset, base);
 749             __ verify_oop(from_reg->as_register(), FILE_AND_LINE);
 750           }

 751           break;
 752         }
 753       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
 754       case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break;
 755       default      : ShouldNotReachHere();
 756     }
 757   }
 758   return store_offset;
 759 }
 760 
 761 
 762 // Attention: caller must encode oop if needed
 763 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 764   int store_offset = code_offset();
 765   switch (type) {
 766     case T_BOOLEAN: // fall through
 767     case T_BYTE  : __ stbx(from_reg->as_register(), base, disp); break;
 768     case T_CHAR  :
 769     case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break;
 770     case T_INT   : __ stwx(from_reg->as_register(), base, disp); break;
 771     case T_LONG  :
 772 #ifdef _LP64
 773       __ stdx(from_reg->as_register_lo(), base, disp);
 774 #else
 775       Unimplemented();
 776 #endif
 777       break;
 778     case T_ADDRESS:
 779       __ stdx(from_reg->as_register(), base, disp);
 780       break;
 781     case T_ARRAY : // fall through
 782     case T_OBJECT:
 783       {
 784         if (UseCompressedOops && !wide) {
 785           // Encoding done in caller.
 786           __ stwx(from_reg->as_register(), base, disp);
 787           __ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
 788         } else {
 789           __ stdx(from_reg->as_register(), base, disp);
 790           __ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0
 791         }

 792         break;
 793       }
 794     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
 795     case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break;
 796     default      : ShouldNotReachHere();
 797   }
 798   return store_offset;
 799 }
 800 
 801 
 802 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
 803   int load_offset;
 804   if (!Assembler::is_simm16(offset)) {
 805     // For offsets larger than a simm16 we setup the offset.
 806     __ load_const_optimized(R0, offset);
 807     load_offset = load(base, R0, to_reg, type, wide);
 808   } else {
 809     load_offset = code_offset();
 810     switch(type) {
 811       case T_BOOLEAN: // fall through


 816       case T_INT   :   __ lwa(to_reg->as_register(), offset, base); break;
 817       case T_LONG  :   __ ld(to_reg->as_register_lo(), offset, base); break;
 818       case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
 819       case T_ADDRESS:
 820         if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
 821           __ lwz(to_reg->as_register(), offset, base);
 822           __ decode_klass_not_null(to_reg->as_register());
 823         } else {
 824           __ ld(to_reg->as_register(), offset, base);
 825         }
 826         break;
 827       case T_ARRAY : // fall through
 828       case T_OBJECT:
 829         {
 830           if (UseCompressedOops && !wide) {
 831             __ lwz(to_reg->as_register(), offset, base);
 832             __ decode_heap_oop(to_reg->as_register());
 833           } else {
 834             __ ld(to_reg->as_register(), offset, base);
 835           }
 836           // Emitting oop verification here makes the code exceed the
 837           // allowed size for PatchingStubs.
 838           // __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
 839           break;
 840         }
 841       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
 842       case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
 843       default      : ShouldNotReachHere();
 844     }
 845   }
 846   return load_offset;
 847 }
 848 
 849 
 850 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 851   int load_offset = code_offset();
 852   switch(type) {
 853     case T_BOOLEAN: // fall through
 854     case T_BYTE  :  __ lbzx(to_reg->as_register(), base, disp);
 855                     __ extsb(to_reg->as_register(), to_reg->as_register()); break;
 856     case T_CHAR  :  __ lhzx(to_reg->as_register(), base, disp); break;
 857     case T_SHORT :  __ lhax(to_reg->as_register(), base, disp); break;
 858     case T_INT   :  __ lwax(to_reg->as_register(), base, disp); break;
 859     case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break;
 860     case T_ARRAY : // fall through
 861     case T_OBJECT:
 862       {
 863         if (UseCompressedOops && !wide) {
 864           __ lwzx(to_reg->as_register(), base, disp);
 865           __ decode_heap_oop(to_reg->as_register());
 866         } else {
 867           __ ldx(to_reg->as_register(), base, disp);
 868         }
 869         // Emitting oop verification here makes the code exceed the
 870         // allowed size for PatchingStubs.
 871         //__ verify_oop(to_reg->as_register(), FILE_AND_LINE);
 872         break;
 873       }
 874     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
 875     case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break;
 876     case T_LONG  :
 877 #ifdef _LP64
 878       __ ldx(to_reg->as_register_lo(), base, disp);
 879 #else
 880       Unimplemented();
 881 #endif
 882       break;
 883     default      : ShouldNotReachHere();
 884   }
 885   return load_offset;
 886 }
 887 
 888 
 889 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 890   LIR_Const* c = src->as_constant_ptr();
 891   Register src_reg = R0;


1130 
1131 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
1132                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
1133 
1134   assert(type != T_METADATA, "load of metadata ptr not supported");
1135   LIR_Address* addr = src_opr->as_address_ptr();
1136   LIR_Opr to_reg = dest;
1137 
1138   Register src = addr->base()->as_pointer_register();
1139   Register disp_reg = noreg;
1140   int disp_value = addr->disp();
1141   bool needs_patching = (patch_code != lir_patch_none);
1142   // null check for large offsets in LIRGenerator::do_LoadField
1143   bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks;
1144 
1145   if (info != NULL && needs_explicit_null_check) {
1146     explicit_null_check(src, info);
1147   }
1148 
1149   if (addr->base()->type() == T_OBJECT) {
1150     __ verify_oop(src, FILE_AND_LINE);
1151   }
1152 
1153   PatchingStub* patch = NULL;
1154   if (needs_patching) {
1155     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1156     assert(!to_reg->is_double_cpu() ||
1157            patch_code == lir_patch_none ||
1158            patch_code == lir_patch_normal, "patching doesn't match register");
1159   }
1160 
1161   if (addr->index()->is_illegal()) {
1162     if (!Assembler::is_simm16(disp_value)) {
1163       if (needs_patching) {
1164         __ load_const32(R0, 0); // patchable int
1165       } else {
1166         __ load_const_optimized(R0, disp_value);
1167       }
1168       disp_reg = R0;
1169     }
1170   } else {


1227       __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg());
1228     } else {
1229       // float to float moves
1230       assert(to_reg->is_single_fpu(), "should match");
1231       __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg());
1232     }
1233   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1234     if (from_reg->is_double_cpu()) {
1235       __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register());
1236     } else if (to_reg->is_double_cpu()) {
1237       // int to int moves
1238       __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register());
1239     } else {
1240       // int to int moves
1241       __ mr_if_needed(to_reg->as_register(), from_reg->as_register());
1242     }
1243   } else {
1244     ShouldNotReachHere();
1245   }
1246   if (is_reference_type(to_reg->type())) {
1247     __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
1248   }
1249 }
1250 
1251 
1252 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1253                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1254                             bool wide, bool unaligned) {
1255   assert(type != T_METADATA, "store of metadata ptr not supported");
1256   LIR_Address* addr = dest->as_address_ptr();
1257 
1258   Register src = addr->base()->as_pointer_register();
1259   Register disp_reg = noreg;
1260   int disp_value = addr->disp();
1261   bool needs_patching = (patch_code != lir_patch_none);
1262   bool compress_oop = (is_reference_type(type)) && UseCompressedOops && !wide &&
1263                       CompressedOops::mode() != CompressedOops::UnscaledNarrowOop;
1264   bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value);
1265   bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29.
1266   // Null check for large offsets in LIRGenerator::do_StoreField.
1267   bool needs_explicit_null_check = !ImplicitNullChecks || use_R29;
1268 
1269   if (info != NULL && needs_explicit_null_check) {
1270     explicit_null_check(src, info);
1271   }
1272 
1273   if (addr->base()->is_oop_register()) {
1274     __ verify_oop(src, FILE_AND_LINE);
1275   }
1276 
1277   PatchingStub* patch = NULL;
1278   if (needs_patching) {
1279     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1280     assert(!from_reg->is_double_cpu() ||
1281            patch_code == lir_patch_none ||
1282            patch_code == lir_patch_normal, "patching doesn't match register");
1283   }
1284 
1285   if (addr->index()->is_illegal()) {
1286     if (load_disp) {
1287       disp_reg = use_R29 ? R29_TOC : R0;
1288       if (needs_patching) {
1289         __ load_const32(disp_reg, 0); // patchable int
1290       } else {
1291         __ load_const_optimized(disp_reg, disp_value);
1292       }
1293     }
1294   } else {


2297     if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2298       explicit_null_check(op->klass()->as_register(), op->stub()->info());
2299     } else {
2300       add_debug_info_for_null_check_here(op->stub()->info());
2301     }
2302     __ lbz(op->tmp1()->as_register(),
2303            in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
2304     __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
2305     __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
2306   }
2307   __ allocate_object(op->obj()->as_register(),
2308                      op->tmp1()->as_register(),
2309                      op->tmp2()->as_register(),
2310                      op->tmp3()->as_register(),
2311                      op->header_size(),
2312                      op->object_size(),
2313                      op->klass()->as_register(),
2314                      *op->stub()->entry());
2315 
2316   __ bind(*op->stub()->continuation());
2317   __ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
2318 }
2319 
2320 
2321 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2322   LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); )
2323   if (UseSlowPath ||
2324       (!UseFastNewObjectArray && (is_reference_type(op->type()))) ||
2325       (!UseFastNewTypeArray   && (!is_reference_type(op->type())))) {
2326     __ b(*op->stub()->entry());
2327   } else {
2328     __ allocate_array(op->obj()->as_register(),
2329                       op->len()->as_register(),
2330                       op->tmp1()->as_register(),
2331                       op->tmp2()->as_register(),
2332                       op->tmp3()->as_register(),
2333                       arrayOopDesc::header_size(op->type()),
2334                       type2aelembytes(op->type()),
2335                       op->klass()->as_register(),
2336                       *op->stub()->entry());
2337   }


2522     __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
2523     __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2524     __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment);
2525     __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo);
2526   }
2527 
2528   __ bind(*failure);
2529 }
2530 
2531 
2532 void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
2533   LIR_Code code = op->code();
2534   if (code == lir_store_check) {
2535     Register value = op->object()->as_register();
2536     Register array = op->array()->as_register();
2537     Register k_RInfo = op->tmp1()->as_register();
2538     Register klass_RInfo = op->tmp2()->as_register();
2539     Register Rtmp1 = op->tmp3()->as_register();
2540     bool should_profile = op->should_profile();
2541 
2542     __ verify_oop(value, FILE_AND_LINE);
2543     CodeStub* stub = op->stub();
2544     // Check if it needs to be profiled.
2545     ciMethodData* md = NULL;
2546     ciProfileData* data = NULL;
2547     int mdo_offset_bias = 0;
2548     if (should_profile) {
2549       ciMethod* method = op->profiled_method();
2550       assert(method != NULL, "Should have method");
2551       setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
2552     }
2553     Label profile_cast_success, failure, done;
2554     Label *success_target = should_profile ? &profile_cast_success : &done;
2555 
2556     __ cmpdi(CCR0, value, 0);
2557     if (should_profile) {
2558       Label not_null;
2559       __ bne(CCR0, not_null);
2560       Register mdo      = k_RInfo;
2561       Register data_val = Rtmp1;
2562       metadata2reg(md->constant_encoding(), mdo);


3075 
3076 
3077 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
3078   Register obj = op->obj()->as_register();
3079   Register tmp = op->tmp()->as_pointer_register();
3080   LIR_Address* mdo_addr = op->mdp()->as_address_ptr();
3081   ciKlass* exact_klass = op->exact_klass();
3082   intptr_t current_klass = op->current_klass();
3083   bool not_null = op->not_null();
3084   bool no_conflict = op->no_conflict();
3085 
3086   Label Lupdate, Ldo_update, Ldone;
3087 
3088   bool do_null = !not_null;
3089   bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3090   bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3091 
3092   assert(do_null || do_update, "why are we here?");
3093   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3094 
3095   __ verify_oop(obj, FILE_AND_LINE);
3096 
3097   if (do_null) {
3098     if (!TypeEntries::was_null_seen(current_klass)) {
3099       __ cmpdi(CCR0, obj, 0);
3100       __ bne(CCR0, Lupdate);
3101       __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3102       __ ori(R0, R0, TypeEntries::null_seen);
3103       if (do_update) {
3104         __ b(Ldo_update);
3105       } else {
3106         __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
3107       }
3108     } else {
3109       if (do_update) {
3110         __ cmpdi(CCR0, obj, 0);
3111         __ beq(CCR0, Ldone);
3112       }
3113     }
3114 #ifdef ASSERT
3115   } else {


< prev index next >