< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page




 711 }
 712 
 713 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 714   assert(src->is_register(), "should not call otherwise");
 715   assert(dest->is_register(), "should not call otherwise");
 716 
 717   // move between cpu-registers
 718   if (dest->is_single_cpu()) {
 719     if (src->type() == T_LONG) {
 720       // Can do LONG -> OBJECT
 721       move_regs(src->as_register_lo(), dest->as_register());
 722       return;
 723     }
 724     assert(src->is_single_cpu(), "must match");
 725     if (src->type() == T_OBJECT) {
 726       __ verify_oop(src->as_register());
 727     }
 728     move_regs(src->as_register(), dest->as_register());
 729 
 730   } else if (dest->is_double_cpu()) {
 731     if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
 732       // Surprising to me but we can see move of a long to t_object
 733       __ verify_oop(src->as_register());
 734       move_regs(src->as_register(), dest->as_register_lo());
 735       return;
 736     }
 737     assert(src->is_double_cpu(), "must match");
 738     Register f_lo = src->as_register_lo();
 739     Register f_hi = src->as_register_hi();
 740     Register t_lo = dest->as_register_lo();
 741     Register t_hi = dest->as_register_hi();
 742     assert(f_hi == f_lo, "must be same");
 743     assert(t_hi == t_lo, "must be same");
 744     move_regs(f_lo, t_lo);
 745 
 746   } else if (dest->is_single_fpu()) {
 747     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 748 
 749   } else if (dest->is_double_fpu()) {
 750     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 751 
 752   } else {
 753     ShouldNotReachHere();
 754   }
 755 }
 756 
 757 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 758   if (src->is_single_cpu()) {
 759     if (type == T_ARRAY || type == T_OBJECT) {
 760       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 761       __ verify_oop(src->as_register());
 762     } else if (type == T_METADATA || type == T_DOUBLE) {
 763       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 764     } else {
 765       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 766     }
 767 
 768   } else if (src->is_double_cpu()) {
 769     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 770     __ str(src->as_register_lo(), dest_addr_LO);
 771 
 772   } else if (src->is_single_fpu()) {
 773     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 774     __ strs(src->as_float_reg(), dest_addr);
 775 
 776   } else if (src->is_double_fpu()) {
 777     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 778     __ strd(src->as_double_reg(), dest_addr);
 779 
 780   } else {
 781     ShouldNotReachHere();
 782   }
 783 
 784 }
 785 
 786 
 787 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 788   LIR_Address* to_addr = dest->as_address_ptr();
 789   PatchingStub* patch = NULL;
 790   Register compressed_src = rscratch1;
 791 
 792   if (patch_code != lir_patch_none) {
 793     deoptimize_trap(info);
 794     return;
 795   }
 796 
 797   if (type == T_ARRAY || type == T_OBJECT) {
 798     __ verify_oop(src->as_register());
 799 
 800     if (UseCompressedOops && !wide) {
 801       __ encode_heap_oop(compressed_src, src->as_register());
 802     } else {
 803       compressed_src = src->as_register();
 804     }
 805   }
 806 
 807   int null_check_here = code_offset();
 808   switch (type) {
 809     case T_FLOAT: {
 810       __ strs(src->as_float_reg(), as_Address(to_addr));
 811       break;
 812     }
 813 
 814     case T_DOUBLE: {
 815       __ strd(src->as_double_reg(), as_Address(to_addr));
 816       break;
 817     }


 852 
 853     case T_CHAR:    // fall through
 854     case T_SHORT:
 855       __ strh(src->as_register(), as_Address(to_addr));
 856       break;
 857 
 858     default:
 859       ShouldNotReachHere();
 860   }
 861   if (info != NULL) {
 862     add_debug_info_for_null_check(null_check_here, info);
 863   }
 864 }
 865 
 866 
 867 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 868   assert(src->is_stack(), "should not call otherwise");
 869   assert(dest->is_register(), "should not call otherwise");
 870 
 871   if (dest->is_single_cpu()) {
 872     if (type == T_ARRAY || type == T_OBJECT) {
 873       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 874       __ verify_oop(dest->as_register());
 875     } else if (type == T_METADATA) {
 876       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 877     } else {
 878       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 879     }
 880 
 881   } else if (dest->is_double_cpu()) {
 882     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 883     __ ldr(dest->as_register_lo(), src_addr_LO);
 884 
 885   } else if (dest->is_single_fpu()) {
 886     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 887     __ ldrs(dest->as_float_reg(), src_addr);
 888 
 889   } else if (dest->is_double_fpu()) {
 890     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 891     __ ldrd(dest->as_double_reg(), src_addr);
 892 


1002 
1003     case T_BYTE:
1004       __ ldrsb(dest->as_register(), as_Address(from_addr));
1005       break;
1006     case T_BOOLEAN: {
1007       __ ldrb(dest->as_register(), as_Address(from_addr));
1008       break;
1009     }
1010 
1011     case T_CHAR:
1012       __ ldrh(dest->as_register(), as_Address(from_addr));
1013       break;
1014     case T_SHORT:
1015       __ ldrsh(dest->as_register(), as_Address(from_addr));
1016       break;
1017 
1018     default:
1019       ShouldNotReachHere();
1020   }
1021 
1022   if (type == T_ARRAY || type == T_OBJECT) {
1023     if (UseCompressedOops && !wide) {
1024       __ decode_heap_oop(dest->as_register());
1025     }
1026 
1027     if (!UseZGC) {
1028       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1029       __ verify_oop(dest->as_register());
1030     }
1031   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1032     if (UseCompressedClassPointers) {
1033       __ decode_klass_not_null(dest->as_register());
1034     }
1035   }
1036 }
1037 
1038 
1039 int LIR_Assembler::array_element_size(BasicType type) const {
1040   int elem_size = type2aelembytes(type);
1041   return exact_log2(elem_size);
1042 }


1210                                InstanceKlass::init_state_offset()));
1211     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1212     add_debug_info_for_null_check_here(op->stub()->info());
1213     __ br(Assembler::NE, *op->stub()->entry());
1214   }
1215   __ allocate_object(op->obj()->as_register(),
1216                      op->tmp1()->as_register(),
1217                      op->tmp2()->as_register(),
1218                      op->header_size(),
1219                      op->object_size(),
1220                      op->klass()->as_register(),
1221                      *op->stub()->entry());
1222   __ bind(*op->stub()->continuation());
1223 }
1224 
1225 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1226   Register len =  op->len()->as_register();
1227   __ uxtw(len, len);
1228 
1229   if (UseSlowPath ||
1230       (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
1231       (!UseFastNewTypeArray   && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
1232     __ b(*op->stub()->entry());
1233   } else {
1234     Register tmp1 = op->tmp1()->as_register();
1235     Register tmp2 = op->tmp2()->as_register();
1236     Register tmp3 = op->tmp3()->as_register();
1237     if (len == tmp1) {
1238       tmp1 = tmp3;
1239     } else if (len == tmp2) {
1240       tmp2 = tmp3;
1241     } else if (len == tmp3) {
1242       // everything is ok
1243     } else {
1244       __ mov(tmp3, len);
1245     }
1246     __ allocate_array(op->obj()->as_register(),
1247                       len,
1248                       tmp1,
1249                       tmp2,
1250                       arrayOopDesc::header_size(op->type()),
1251                       array_element_size(op->type()),


1931       }
1932     }
1933   } else {
1934     Register rreg = right->as_register();
1935     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1936   }
1937 }
1938 
1939 
1940 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1941   if (opr1->is_constant() && opr2->is_single_cpu()) {
1942     // tableswitch
1943     Register reg = as_reg(opr2);
1944     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1945     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1946   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1947     Register reg1 = as_reg(opr1);
1948     if (opr2->is_single_cpu()) {
1949       // cpu register - cpu register
1950       Register reg2 = opr2->as_register();
1951       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
1952         __ cmpoop(reg1, reg2);
1953       } else {
1954         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
1955         __ cmpw(reg1, reg2);
1956       }
1957       return;
1958     }
1959     if (opr2->is_double_cpu()) {
1960       // cpu register - cpu register
1961       Register reg2 = opr2->as_register_lo();
1962       __ cmp(reg1, reg2);
1963       return;
1964     }
1965 
1966     if (opr2->is_constant()) {
1967       bool is_32bit = false; // width of register operand
1968       jlong imm;
1969 
1970       switch(opr2->type()) {
1971       case T_INT:
1972         imm = opr2->as_constant_ptr()->as_jint();
1973         is_32bit = true;
1974         break;


2226 
2227 
2228 // This code replaces a call to arraycopy; no exception may
2229 // be thrown in this code, they must be thrown in the System.arraycopy
2230 // activation frame; we could save some checks if this would not be the case
2231 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2232   ciArrayKlass* default_type = op->expected_type();
2233   Register src = op->src()->as_register();
2234   Register dst = op->dst()->as_register();
2235   Register src_pos = op->src_pos()->as_register();
2236   Register dst_pos = op->dst_pos()->as_register();
2237   Register length  = op->length()->as_register();
2238   Register tmp = op->tmp()->as_register();
2239 
2240   __ resolve(ACCESS_READ, src);
2241   __ resolve(ACCESS_WRITE, dst);
2242 
2243   CodeStub* stub = op->stub();
2244   int flags = op->flags();
2245   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2246   if (basic_type == T_ARRAY) basic_type = T_OBJECT;
2247 
2248   // if we don't know anything, just go through the generic arraycopy
2249   if (default_type == NULL // || basic_type == T_OBJECT
2250       ) {
2251     Label done;
2252     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2253 
2254     // Save the arguments in case the generic arraycopy fails and we
2255     // have to fall back to the JNI stub
2256     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2257     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2258     __ str(src,              Address(sp, 4*BytesPerWord));
2259 
2260     address copyfunc_addr = StubRoutines::generic_arraycopy();
2261     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2262 
2263     // The arguments are in java calling convention so we shift them
2264     // to C convention
2265     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2266     __ mov(c_rarg0, j_rarg0);


3114         // Insert the tableswitch instruction
3115         inst->insert_before(start_insn,
3116                             new LIR_Op2(lir_cmp, lir_cond_always,
3117                                         LIR_OprFact::intConst(tableswitch_count),
3118                                         reg_opr));
3119         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3120         tableswitch_count++;
3121       }
3122       reg = noreg;
3123       last_key = -2147483648;
3124     }
3125   next_state:
3126     ;
3127   }
3128 #endif
3129 }
3130 
3131 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3132   Address addr = as_Address(src->as_address_ptr());
3133   BasicType type = src->type();
3134   bool is_oop = type == T_OBJECT || type == T_ARRAY;
3135 
3136   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3137   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3138 
3139   switch(type) {
3140   case T_INT:
3141     xchg = &MacroAssembler::atomic_xchgalw;
3142     add = &MacroAssembler::atomic_addalw;
3143     break;
3144   case T_LONG:
3145     xchg = &MacroAssembler::atomic_xchgal;
3146     add = &MacroAssembler::atomic_addal;
3147     break;
3148   case T_OBJECT:
3149   case T_ARRAY:
3150     if (UseCompressedOops) {
3151       xchg = &MacroAssembler::atomic_xchgalw;
3152       add = &MacroAssembler::atomic_addalw;
3153     } else {
3154       xchg = &MacroAssembler::atomic_xchgal;




 711 }
 712 
 713 void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
 714   assert(src->is_register(), "should not call otherwise");
 715   assert(dest->is_register(), "should not call otherwise");
 716 
 717   // move between cpu-registers
 718   if (dest->is_single_cpu()) {
 719     if (src->type() == T_LONG) {
 720       // Can do LONG -> OBJECT
 721       move_regs(src->as_register_lo(), dest->as_register());
 722       return;
 723     }
 724     assert(src->is_single_cpu(), "must match");
 725     if (src->type() == T_OBJECT) {
 726       __ verify_oop(src->as_register());
 727     }
 728     move_regs(src->as_register(), dest->as_register());
 729 
 730   } else if (dest->is_double_cpu()) {
 731     if (is_reference_type(src->type()) {
 732       // Surprising to me but we can see move of a long to t_object
 733       __ verify_oop(src->as_register());
 734       move_regs(src->as_register(), dest->as_register_lo());
 735       return;
 736     }
 737     assert(src->is_double_cpu(), "must match");
 738     Register f_lo = src->as_register_lo();
 739     Register f_hi = src->as_register_hi();
 740     Register t_lo = dest->as_register_lo();
 741     Register t_hi = dest->as_register_hi();
 742     assert(f_hi == f_lo, "must be same");
 743     assert(t_hi == t_lo, "must be same");
 744     move_regs(f_lo, t_lo);
 745 
 746   } else if (dest->is_single_fpu()) {
 747     __ fmovs(dest->as_float_reg(), src->as_float_reg());
 748 
 749   } else if (dest->is_double_fpu()) {
 750     __ fmovd(dest->as_double_reg(), src->as_double_reg());
 751 
 752   } else {
 753     ShouldNotReachHere();
 754   }
 755 }
 756 
 757 void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack) {
 758   if (src->is_single_cpu()) {
 759     if (is_reference_type(type)) {
 760       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 761       __ verify_oop(src->as_register());
 762     } else if (type == T_METADATA || type == T_DOUBLE) {
 763       __ str(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 764     } else {
 765       __ strw(src->as_register(), frame_map()->address_for_slot(dest->single_stack_ix()));
 766     }
 767 
 768   } else if (src->is_double_cpu()) {
 769     Address dest_addr_LO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
 770     __ str(src->as_register_lo(), dest_addr_LO);
 771 
 772   } else if (src->is_single_fpu()) {
 773     Address dest_addr = frame_map()->address_for_slot(dest->single_stack_ix());
 774     __ strs(src->as_float_reg(), dest_addr);
 775 
 776   } else if (src->is_double_fpu()) {
 777     Address dest_addr = frame_map()->address_for_slot(dest->double_stack_ix());
 778     __ strd(src->as_double_reg(), dest_addr);
 779 
 780   } else {
 781     ShouldNotReachHere();
 782   }
 783 
 784 }
 785 
 786 
 787 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
 788   LIR_Address* to_addr = dest->as_address_ptr();
 789   PatchingStub* patch = NULL;
 790   Register compressed_src = rscratch1;
 791 
 792   if (patch_code != lir_patch_none) {
 793     deoptimize_trap(info);
 794     return;
 795   }
 796 
 797   if (is_reference_type(type)) {
 798     __ verify_oop(src->as_register());
 799 
 800     if (UseCompressedOops && !wide) {
 801       __ encode_heap_oop(compressed_src, src->as_register());
 802     } else {
 803       compressed_src = src->as_register();
 804     }
 805   }
 806 
 807   int null_check_here = code_offset();
 808   switch (type) {
 809     case T_FLOAT: {
 810       __ strs(src->as_float_reg(), as_Address(to_addr));
 811       break;
 812     }
 813 
 814     case T_DOUBLE: {
 815       __ strd(src->as_double_reg(), as_Address(to_addr));
 816       break;
 817     }


 852 
 853     case T_CHAR:    // fall through
 854     case T_SHORT:
 855       __ strh(src->as_register(), as_Address(to_addr));
 856       break;
 857 
 858     default:
 859       ShouldNotReachHere();
 860   }
 861   if (info != NULL) {
 862     add_debug_info_for_null_check(null_check_here, info);
 863   }
 864 }
 865 
 866 
 867 void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
 868   assert(src->is_stack(), "should not call otherwise");
 869   assert(dest->is_register(), "should not call otherwise");
 870 
 871   if (dest->is_single_cpu()) {
 872     if (is_reference_type(type)) {
 873       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 874       __ verify_oop(dest->as_register());
 875     } else if (type == T_METADATA) {
 876       __ ldr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 877     } else {
 878       __ ldrw(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
 879     }
 880 
 881   } else if (dest->is_double_cpu()) {
 882     Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
 883     __ ldr(dest->as_register_lo(), src_addr_LO);
 884 
 885   } else if (dest->is_single_fpu()) {
 886     Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
 887     __ ldrs(dest->as_float_reg(), src_addr);
 888 
 889   } else if (dest->is_double_fpu()) {
 890     Address src_addr = frame_map()->address_for_slot(src->double_stack_ix());
 891     __ ldrd(dest->as_double_reg(), src_addr);
 892 


1002 
1003     case T_BYTE:
1004       __ ldrsb(dest->as_register(), as_Address(from_addr));
1005       break;
1006     case T_BOOLEAN: {
1007       __ ldrb(dest->as_register(), as_Address(from_addr));
1008       break;
1009     }
1010 
1011     case T_CHAR:
1012       __ ldrh(dest->as_register(), as_Address(from_addr));
1013       break;
1014     case T_SHORT:
1015       __ ldrsh(dest->as_register(), as_Address(from_addr));
1016       break;
1017 
1018     default:
1019       ShouldNotReachHere();
1020   }
1021 
1022   if (is_reference_type(type)) {
1023     if (UseCompressedOops && !wide) {
1024       __ decode_heap_oop(dest->as_register());
1025     }
1026 
1027     if (!UseZGC) {
1028       // Load barrier has not yet been applied, so ZGC can't verify the oop here
1029       __ verify_oop(dest->as_register());
1030     }
1031   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1032     if (UseCompressedClassPointers) {
1033       __ decode_klass_not_null(dest->as_register());
1034     }
1035   }
1036 }
1037 
1038 
1039 int LIR_Assembler::array_element_size(BasicType type) const {
1040   int elem_size = type2aelembytes(type);
1041   return exact_log2(elem_size);
1042 }


1210                                InstanceKlass::init_state_offset()));
1211     __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1212     add_debug_info_for_null_check_here(op->stub()->info());
1213     __ br(Assembler::NE, *op->stub()->entry());
1214   }
1215   __ allocate_object(op->obj()->as_register(),
1216                      op->tmp1()->as_register(),
1217                      op->tmp2()->as_register(),
1218                      op->header_size(),
1219                      op->object_size(),
1220                      op->klass()->as_register(),
1221                      *op->stub()->entry());
1222   __ bind(*op->stub()->continuation());
1223 }
1224 
1225 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1226   Register len =  op->len()->as_register();
1227   __ uxtw(len, len);
1228 
1229   if (UseSlowPath ||
1230       (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1231       (!UseFastNewTypeArray   && !is_reference_type(op->type()))) {
1232     __ b(*op->stub()->entry());
1233   } else {
1234     Register tmp1 = op->tmp1()->as_register();
1235     Register tmp2 = op->tmp2()->as_register();
1236     Register tmp3 = op->tmp3()->as_register();
1237     if (len == tmp1) {
1238       tmp1 = tmp3;
1239     } else if (len == tmp2) {
1240       tmp2 = tmp3;
1241     } else if (len == tmp3) {
1242       // everything is ok
1243     } else {
1244       __ mov(tmp3, len);
1245     }
1246     __ allocate_array(op->obj()->as_register(),
1247                       len,
1248                       tmp1,
1249                       tmp2,
1250                       arrayOopDesc::header_size(op->type()),
1251                       array_element_size(op->type()),


1931       }
1932     }
1933   } else {
1934     Register rreg = right->as_register();
1935     __ corrected_idivl(dreg, lreg, rreg, is_irem, rscratch1);
1936   }
1937 }
1938 
1939 
1940 void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
1941   if (opr1->is_constant() && opr2->is_single_cpu()) {
1942     // tableswitch
1943     Register reg = as_reg(opr2);
1944     struct tableswitch &table = switches[opr1->as_constant_ptr()->as_jint()];
1945     __ tableswitch(reg, table._first_key, table._last_key, table._branches, table._after);
1946   } else if (opr1->is_single_cpu() || opr1->is_double_cpu()) {
1947     Register reg1 = as_reg(opr1);
1948     if (opr2->is_single_cpu()) {
1949       // cpu register - cpu register
1950       Register reg2 = opr2->as_register();
1951       if (is_reference_type(opr1->type())) {
1952         __ cmpoop(reg1, reg2);
1953       } else {
1954         assert(!is_reference_type(opr2->type()), "cmp int, oop?");
1955         __ cmpw(reg1, reg2);
1956       }
1957       return;
1958     }
1959     if (opr2->is_double_cpu()) {
1960       // cpu register - cpu register
1961       Register reg2 = opr2->as_register_lo();
1962       __ cmp(reg1, reg2);
1963       return;
1964     }
1965 
1966     if (opr2->is_constant()) {
1967       bool is_32bit = false; // width of register operand
1968       jlong imm;
1969 
1970       switch(opr2->type()) {
1971       case T_INT:
1972         imm = opr2->as_constant_ptr()->as_jint();
1973         is_32bit = true;
1974         break;


2226 
2227 
2228 // This code replaces a call to arraycopy; no exception may
2229 // be thrown in this code, they must be thrown in the System.arraycopy
2230 // activation frame; we could save some checks if this would not be the case
2231 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2232   ciArrayKlass* default_type = op->expected_type();
2233   Register src = op->src()->as_register();
2234   Register dst = op->dst()->as_register();
2235   Register src_pos = op->src_pos()->as_register();
2236   Register dst_pos = op->dst_pos()->as_register();
2237   Register length  = op->length()->as_register();
2238   Register tmp = op->tmp()->as_register();
2239 
2240   __ resolve(ACCESS_READ, src);
2241   __ resolve(ACCESS_WRITE, dst);
2242 
2243   CodeStub* stub = op->stub();
2244   int flags = op->flags();
2245   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
2246   if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2247 
2248   // if we don't know anything, just go through the generic arraycopy
2249   if (default_type == NULL // || basic_type == T_OBJECT
2250       ) {
2251     Label done;
2252     assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2253 
2254     // Save the arguments in case the generic arraycopy fails and we
2255     // have to fall back to the JNI stub
2256     __ stp(dst,     dst_pos, Address(sp, 0*BytesPerWord));
2257     __ stp(length,  src_pos, Address(sp, 2*BytesPerWord));
2258     __ str(src,              Address(sp, 4*BytesPerWord));
2259 
2260     address copyfunc_addr = StubRoutines::generic_arraycopy();
2261     assert(copyfunc_addr != NULL, "generic arraycopy stub required");
2262 
2263     // The arguments are in java calling convention so we shift them
2264     // to C convention
2265     assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2266     __ mov(c_rarg0, j_rarg0);


3114         // Insert the tableswitch instruction
3115         inst->insert_before(start_insn,
3116                             new LIR_Op2(lir_cmp, lir_cond_always,
3117                                         LIR_OprFact::intConst(tableswitch_count),
3118                                         reg_opr));
3119         inst->insert_before(start_insn + 1, new LIR_OpLabel(&sw->_branches));
3120         tableswitch_count++;
3121       }
3122       reg = noreg;
3123       last_key = -2147483648;
3124     }
3125   next_state:
3126     ;
3127   }
3128 #endif
3129 }
3130 
3131 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
3132   Address addr = as_Address(src->as_address_ptr());
3133   BasicType type = src->type();
3134   bool is_oop = is_reference_type(type);
3135 
3136   void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
3137   void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
3138 
3139   switch(type) {
3140   case T_INT:
3141     xchg = &MacroAssembler::atomic_xchgalw;
3142     add = &MacroAssembler::atomic_addalw;
3143     break;
3144   case T_LONG:
3145     xchg = &MacroAssembler::atomic_xchgal;
3146     add = &MacroAssembler::atomic_addal;
3147     break;
3148   case T_OBJECT:
3149   case T_ARRAY:
3150     if (UseCompressedOops) {
3151       xchg = &MacroAssembler::atomic_xchgalw;
3152       add = &MacroAssembler::atomic_addalw;
3153     } else {
3154       xchg = &MacroAssembler::atomic_xchgal;


< prev index next >