< prev index next >

src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp

Print this page


   1 /*
   2  * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  90         // this works around a problem where moves with the same src and dst
  91         // end up in the delay slot and then the assembler swallows the mov
  92         // since it has no effect and then it complains because the delay slot
  93         // is empty.  returning false stops the optimizer from putting this in
  94         // the delay slot
  95         return false;
  96       }
  97 
  98       // don't put moves involving oops into the delay slot since the VerifyOops code
  99       // will make it much larger than a single instruction.
 100       if (VerifyOops) {
 101         return false;
 102       }
 103 
 104       if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
 105           ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
 106         return false;
 107       }
 108 
 109       if (UseCompressedOops) {
 110         if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
 111         if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
 112       }
 113 
 114       if (UseCompressedClassPointers) {
 115         if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
 116             src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
 117       }
 118 
 119       if (dst->is_register()) {
 120         if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
 121           return !PatchALot;
 122         } else if (src->is_single_stack()) {
 123           return true;
 124         }
 125       }
 126 
 127       if (src->is_register()) {
 128         if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
 129           return !PatchALot;
 130         } else if (dst->is_single_stack()) {
 131           return true;


 711   } else {
 712     // This will generate 2 instructions
 713     __ set(op->vtable_offset(), G5_method);
 714     // ld_ptr, set_hi, set
 715     __ ld_ptr(G3_scratch, G5_method, G5_method);
 716   }
 717   __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
 718   __ callr(G3_scratch, G0);
 719   // the peephole pass fills the delay slot
 720 }
 721 
 722 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
 723   int store_offset;
 724   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
 725     assert(base != O7, "destroying register");
 726     assert(!unaligned, "can't handle this");
 727     // for offsets larger than a simm13 we setup the offset in O7
 728     __ set(offset, O7);
 729     store_offset = store(from_reg, base, O7, type, wide);
 730   } else {
 731     if (type == T_ARRAY || type == T_OBJECT) {
 732       __ verify_oop(from_reg->as_register());
 733     }
 734     store_offset = code_offset();
 735     switch (type) {
 736       case T_BOOLEAN: // fall through
 737       case T_BYTE  : __ stb(from_reg->as_register(), base, offset); break;
 738       case T_CHAR  : __ sth(from_reg->as_register(), base, offset); break;
 739       case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
 740       case T_INT   : __ stw(from_reg->as_register(), base, offset); break;
 741       case T_LONG  :
 742         if (unaligned || PatchALot) {
 743           // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
 744           assert(G3_scratch != base, "can't handle this");
 745           assert(G3_scratch != from_reg->as_register_lo(), "can't handle this");
 746           __ srax(from_reg->as_register_lo(), 32, G3_scratch);
 747           __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
 748           __ stw(G3_scratch,                 base, offset + hi_word_offset_in_bytes);
 749         } else {
 750           __ stx(from_reg->as_register_lo(), base, offset);
 751         }


 772         {
 773           FloatRegister reg = from_reg->as_double_reg();
 774           // split unaligned stores
 775           if (unaligned || PatchALot) {
 776             assert(Assembler::is_simm13(offset + 4), "must be");
 777             __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
 778             __ stf(FloatRegisterImpl::S, reg,              base, offset);
 779           } else {
 780             __ stf(FloatRegisterImpl::D, reg, base, offset);
 781           }
 782           break;
 783         }
 784       default      : ShouldNotReachHere();
 785     }
 786   }
 787   return store_offset;
 788 }
 789 
 790 
 791 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 792   if (type == T_ARRAY || type == T_OBJECT) {
 793     __ verify_oop(from_reg->as_register());
 794   }
 795   int store_offset = code_offset();
 796   switch (type) {
 797     case T_BOOLEAN: // fall through
 798     case T_BYTE  : __ stb(from_reg->as_register(), base, disp); break;
 799     case T_CHAR  : __ sth(from_reg->as_register(), base, disp); break;
 800     case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
 801     case T_INT   : __ stw(from_reg->as_register(), base, disp); break;
 802     case T_LONG  :
 803       __ stx(from_reg->as_register_lo(), base, disp);
 804       break;
 805     case T_ADDRESS:
 806       __ st_ptr(from_reg->as_register(), base, disp);
 807       break;
 808     case T_ARRAY : // fall through
 809     case T_OBJECT:
 810       {
 811         if (UseCompressedOops && !wide) {
 812           __ encode_heap_oop(from_reg->as_register(), G3_scratch);


 872           } else {
 873             __ ld_ptr(base, offset, to_reg->as_register());
 874           }
 875           break;
 876         }
 877       case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
 878       case T_DOUBLE:
 879         {
 880           FloatRegister reg = to_reg->as_double_reg();
 881           // split unaligned loads
 882           if (unaligned || PatchALot) {
 883             __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
 884             __ ldf(FloatRegisterImpl::S, base, offset,     reg);
 885           } else {
 886             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
 887           }
 888           break;
 889         }
 890       default      : ShouldNotReachHere();
 891     }
 892     if (type == T_ARRAY || type == T_OBJECT) {
 893       __ verify_oop(to_reg->as_register());
 894     }
 895   }
 896   return load_offset;
 897 }
 898 
 899 
 900 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 901   int load_offset = code_offset();
 902   switch(type) {
 903     case T_BOOLEAN: // fall through
 904     case T_BYTE  :  __ ldsb(base, disp, to_reg->as_register()); break;
 905     case T_CHAR  :  __ lduh(base, disp, to_reg->as_register()); break;
 906     case T_SHORT :  __ ldsh(base, disp, to_reg->as_register()); break;
 907     case T_INT   :  __ ld(base, disp, to_reg->as_register()); break;
 908     case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
 909     case T_ARRAY : // fall through
 910     case T_OBJECT:
 911       {
 912           if (UseCompressedOops && !wide) {
 913             __ lduw(base, disp, to_reg->as_register());
 914             __ decode_heap_oop(to_reg->as_register());
 915           } else {
 916             __ ld_ptr(base, disp, to_reg->as_register());
 917           }
 918           break;
 919       }
 920     case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
 921     case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
 922     case T_LONG  :
 923       __ ldx(base, disp, to_reg->as_register_lo());
 924       break;
 925     default      : ShouldNotReachHere();
 926   }
 927   if (type == T_ARRAY || type == T_OBJECT) {
 928     __ verify_oop(to_reg->as_register());
 929   }
 930   return load_offset;
 931 }
 932 
 933 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 934   LIR_Const* c = src->as_constant_ptr();
 935   switch (c->type()) {
 936     case T_INT:
 937     case T_FLOAT: {
 938       Register src_reg = O7;
 939       int value = c->as_jint_bits();
 940       if (value == 0) {
 941         src_reg = G0;
 942       } else {
 943         __ set(value, O7);
 944       }
 945       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 946       __ stw(src_reg, addr.base(), addr.disp());
 947       break;


1342       assert(to_reg->is_double_fpu(), "should match");
1343       __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1344     } else {
1345       // float to float moves
1346       assert(to_reg->is_single_fpu(), "should match");
1347       __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1348     }
1349   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1350     if (from_reg->is_double_cpu()) {
1351       __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1352     } else if (to_reg->is_double_cpu()) {
1353       // int to int moves
1354       __ mov(from_reg->as_register(), to_reg->as_register_lo());
1355     } else {
1356       // int to int moves
1357       __ mov(from_reg->as_register(), to_reg->as_register());
1358     }
1359   } else {
1360     ShouldNotReachHere();
1361   }
1362   if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) {
1363     __ verify_oop(to_reg->as_register());
1364   }
1365 }
1366 
1367 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1368                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1369                             bool wide, bool unaligned) {
1370   assert(type != T_METADATA, "store of metadata ptr not supported");
1371   LIR_Address* addr = dest->as_address_ptr();
1372 
1373   Register src = addr->base()->as_pointer_register();
1374   Register disp_reg = noreg;
1375   int disp_value = addr->disp();
1376   bool needs_patching = (patch_code != lir_patch_none);
1377 
1378   if (addr->base()->is_oop_register()) {
1379     __ verify_oop(src);
1380   }
1381 
1382   PatchingStub* patch = NULL;


2278                      op->tmp2()->as_register(),
2279                      op->tmp3()->as_register(),
2280                      op->header_size(),
2281                      op->object_size(),
2282                      op->klass()->as_register(),
2283                      *op->stub()->entry());
2284   __ bind(*op->stub()->continuation());
2285   __ verify_oop(op->obj()->as_register());
2286 }
2287 
2288 
2289 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2290   assert(op->tmp1()->as_register()  == G1 &&
2291          op->tmp2()->as_register()  == G3 &&
2292          op->tmp3()->as_register()  == G4 &&
2293          op->tmp4()->as_register()  == O1 &&
2294          op->klass()->as_register() == G5, "must be");
2295 
2296   __ signx(op->len()->as_register());
2297   if (UseSlowPath ||
2298       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
2299       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
2300     __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2301     __ delayed()->nop();
2302   } else {
2303     __ allocate_array(op->obj()->as_register(),
2304                       op->len()->as_register(),
2305                       op->tmp1()->as_register(),
2306                       op->tmp2()->as_register(),
2307                       op->tmp3()->as_register(),
2308                       arrayOopDesc::header_size(op->type()),
2309                       type2aelembytes(op->type()),
2310                       op->klass()->as_register(),
2311                       *op->stub()->entry());
2312   }
2313   __ bind(*op->stub()->continuation());
2314 }
2315 
2316 
2317 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2318                                         ciMethodData *md, ciProfileData *data,
2319                                         Register recv, Register tmp1, Label* update_done) {


   1 /*
   2  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  90         // this works around a problem where moves with the same src and dst
  91         // end up in the delay slot and then the assembler swallows the mov
  92         // since it has no effect and then it complains because the delay slot
  93         // is empty.  returning false stops the optimizer from putting this in
  94         // the delay slot
  95         return false;
  96       }
  97 
  98       // don't put moves involving oops into the delay slot since the VerifyOops code
  99       // will make it much larger than a single instruction.
 100       if (VerifyOops) {
 101         return false;
 102       }
 103 
 104       if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none ||
 105           ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) {
 106         return false;
 107       }
 108 
 109       if (UseCompressedOops) {
 110         if (dst->is_address() && !dst->is_stack() && is_reference_type(dst->type())) return false;
 111         if (src->is_address() && !src->is_stack() && is_reference_type(src->type())) return false;
 112       }
 113 
 114       if (UseCompressedClassPointers) {
 115         if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
 116             src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
 117       }
 118 
 119       if (dst->is_register()) {
 120         if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
 121           return !PatchALot;
 122         } else if (src->is_single_stack()) {
 123           return true;
 124         }
 125       }
 126 
 127       if (src->is_register()) {
 128         if (dst->is_address() && Assembler::is_simm13(dst->as_address_ptr()->disp())) {
 129           return !PatchALot;
 130         } else if (dst->is_single_stack()) {
 131           return true;


 711   } else {
 712     // This will generate 2 instructions
 713     __ set(op->vtable_offset(), G5_method);
 714     // ld_ptr, set_hi, set
 715     __ ld_ptr(G3_scratch, G5_method, G5_method);
 716   }
 717   __ ld_ptr(G5_method, Method::from_compiled_offset(), G3_scratch);
 718   __ callr(G3_scratch, G0);
 719   // the peephole pass fills the delay slot
 720 }
 721 
 722 int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
 723   int store_offset;
 724   if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
 725     assert(base != O7, "destroying register");
 726     assert(!unaligned, "can't handle this");
 727     // for offsets larger than a simm13 we setup the offset in O7
 728     __ set(offset, O7);
 729     store_offset = store(from_reg, base, O7, type, wide);
 730   } else {
 731     if (is_reference_type(type)) {
 732       __ verify_oop(from_reg->as_register());
 733     }
 734     store_offset = code_offset();
 735     switch (type) {
 736       case T_BOOLEAN: // fall through
 737       case T_BYTE  : __ stb(from_reg->as_register(), base, offset); break;
 738       case T_CHAR  : __ sth(from_reg->as_register(), base, offset); break;
 739       case T_SHORT : __ sth(from_reg->as_register(), base, offset); break;
 740       case T_INT   : __ stw(from_reg->as_register(), base, offset); break;
 741       case T_LONG  :
 742         if (unaligned || PatchALot) {
 743           // Don't use O7 here because it may be equal to 'base' (see LIR_Assembler::reg2mem)
 744           assert(G3_scratch != base, "can't handle this");
 745           assert(G3_scratch != from_reg->as_register_lo(), "can't handle this");
 746           __ srax(from_reg->as_register_lo(), 32, G3_scratch);
 747           __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes);
 748           __ stw(G3_scratch,                 base, offset + hi_word_offset_in_bytes);
 749         } else {
 750           __ stx(from_reg->as_register_lo(), base, offset);
 751         }


 772         {
 773           FloatRegister reg = from_reg->as_double_reg();
 774           // split unaligned stores
 775           if (unaligned || PatchALot) {
 776             assert(Assembler::is_simm13(offset + 4), "must be");
 777             __ stf(FloatRegisterImpl::S, reg->successor(), base, offset + 4);
 778             __ stf(FloatRegisterImpl::S, reg,              base, offset);
 779           } else {
 780             __ stf(FloatRegisterImpl::D, reg, base, offset);
 781           }
 782           break;
 783         }
 784       default      : ShouldNotReachHere();
 785     }
 786   }
 787   return store_offset;
 788 }
 789 
 790 
 791 int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
 792   if (is_reference_type(type)) {
 793     __ verify_oop(from_reg->as_register());
 794   }
 795   int store_offset = code_offset();
 796   switch (type) {
 797     case T_BOOLEAN: // fall through
 798     case T_BYTE  : __ stb(from_reg->as_register(), base, disp); break;
 799     case T_CHAR  : __ sth(from_reg->as_register(), base, disp); break;
 800     case T_SHORT : __ sth(from_reg->as_register(), base, disp); break;
 801     case T_INT   : __ stw(from_reg->as_register(), base, disp); break;
 802     case T_LONG  :
 803       __ stx(from_reg->as_register_lo(), base, disp);
 804       break;
 805     case T_ADDRESS:
 806       __ st_ptr(from_reg->as_register(), base, disp);
 807       break;
 808     case T_ARRAY : // fall through
 809     case T_OBJECT:
 810       {
 811         if (UseCompressedOops && !wide) {
 812           __ encode_heap_oop(from_reg->as_register(), G3_scratch);


 872           } else {
 873             __ ld_ptr(base, offset, to_reg->as_register());
 874           }
 875           break;
 876         }
 877       case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
 878       case T_DOUBLE:
 879         {
 880           FloatRegister reg = to_reg->as_double_reg();
 881           // split unaligned loads
 882           if (unaligned || PatchALot) {
 883             __ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
 884             __ ldf(FloatRegisterImpl::S, base, offset,     reg);
 885           } else {
 886             __ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
 887           }
 888           break;
 889         }
 890       default      : ShouldNotReachHere();
 891     }
 892     if (is_reference_type(type)) {
 893       __ verify_oop(to_reg->as_register());
 894     }
 895   }
 896   return load_offset;
 897 }
 898 
 899 
 900 int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
 901   int load_offset = code_offset();
 902   switch(type) {
 903     case T_BOOLEAN: // fall through
 904     case T_BYTE  :  __ ldsb(base, disp, to_reg->as_register()); break;
 905     case T_CHAR  :  __ lduh(base, disp, to_reg->as_register()); break;
 906     case T_SHORT :  __ ldsh(base, disp, to_reg->as_register()); break;
 907     case T_INT   :  __ ld(base, disp, to_reg->as_register()); break;
 908     case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
 909     case T_ARRAY : // fall through
 910     case T_OBJECT:
 911       {
 912           if (UseCompressedOops && !wide) {
 913             __ lduw(base, disp, to_reg->as_register());
 914             __ decode_heap_oop(to_reg->as_register());
 915           } else {
 916             __ ld_ptr(base, disp, to_reg->as_register());
 917           }
 918           break;
 919       }
 920     case T_FLOAT:  __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
 921     case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
 922     case T_LONG  :
 923       __ ldx(base, disp, to_reg->as_register_lo());
 924       break;
 925     default      : ShouldNotReachHere();
 926   }
 927   if (is_reference_type(type)) {
 928     __ verify_oop(to_reg->as_register());
 929   }
 930   return load_offset;
 931 }
 932 
 933 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
 934   LIR_Const* c = src->as_constant_ptr();
 935   switch (c->type()) {
 936     case T_INT:
 937     case T_FLOAT: {
 938       Register src_reg = O7;
 939       int value = c->as_jint_bits();
 940       if (value == 0) {
 941         src_reg = G0;
 942       } else {
 943         __ set(value, O7);
 944       }
 945       Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
 946       __ stw(src_reg, addr.base(), addr.disp());
 947       break;


1342       assert(to_reg->is_double_fpu(), "should match");
1343       __ fmov(FloatRegisterImpl::D, from_reg->as_double_reg(), to_reg->as_double_reg());
1344     } else {
1345       // float to float moves
1346       assert(to_reg->is_single_fpu(), "should match");
1347       __ fmov(FloatRegisterImpl::S, from_reg->as_float_reg(), to_reg->as_float_reg());
1348     }
1349   } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) {
1350     if (from_reg->is_double_cpu()) {
1351       __ mov(from_reg->as_pointer_register(), to_reg->as_pointer_register());
1352     } else if (to_reg->is_double_cpu()) {
1353       // int to int moves
1354       __ mov(from_reg->as_register(), to_reg->as_register_lo());
1355     } else {
1356       // int to int moves
1357       __ mov(from_reg->as_register(), to_reg->as_register());
1358     }
1359   } else {
1360     ShouldNotReachHere();
1361   }
1362   if (is_reference_type(to_reg->type())) {
1363     __ verify_oop(to_reg->as_register());
1364   }
1365 }
1366 
1367 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
1368                             LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
1369                             bool wide, bool unaligned) {
1370   assert(type != T_METADATA, "store of metadata ptr not supported");
1371   LIR_Address* addr = dest->as_address_ptr();
1372 
1373   Register src = addr->base()->as_pointer_register();
1374   Register disp_reg = noreg;
1375   int disp_value = addr->disp();
1376   bool needs_patching = (patch_code != lir_patch_none);
1377 
1378   if (addr->base()->is_oop_register()) {
1379     __ verify_oop(src);
1380   }
1381 
1382   PatchingStub* patch = NULL;


2278                      op->tmp2()->as_register(),
2279                      op->tmp3()->as_register(),
2280                      op->header_size(),
2281                      op->object_size(),
2282                      op->klass()->as_register(),
2283                      *op->stub()->entry());
2284   __ bind(*op->stub()->continuation());
2285   __ verify_oop(op->obj()->as_register());
2286 }
2287 
2288 
2289 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
2290   assert(op->tmp1()->as_register()  == G1 &&
2291          op->tmp2()->as_register()  == G3 &&
2292          op->tmp3()->as_register()  == G4 &&
2293          op->tmp4()->as_register()  == O1 &&
2294          op->klass()->as_register() == G5, "must be");
2295 
2296   __ signx(op->len()->as_register());
2297   if (UseSlowPath ||
2298       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
2299       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
2300     __ br(Assembler::always, false, Assembler::pt, *op->stub()->entry());
2301     __ delayed()->nop();
2302   } else {
2303     __ allocate_array(op->obj()->as_register(),
2304                       op->len()->as_register(),
2305                       op->tmp1()->as_register(),
2306                       op->tmp2()->as_register(),
2307                       op->tmp3()->as_register(),
2308                       arrayOopDesc::header_size(op->type()),
2309                       type2aelembytes(op->type()),
2310                       op->klass()->as_register(),
2311                       *op->stub()->entry());
2312   }
2313   __ bind(*op->stub()->continuation());
2314 }
2315 
2316 
2317 void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
2318                                         ciMethodData *md, ciProfileData *data,
2319                                         Register recv, Register tmp1, Label* update_done) {


< prev index next >