src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Nov 25 07:05:51 2010
--- new/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Nov 25 07:05:50 2010

*** 98,107 **** --- 98,112 ---- if (src->is_double_cpu() || dst->is_double_cpu() || op1->patch_code() != lir_patch_none || ((src->is_double_fpu() || dst->is_double_fpu()) && op1->move_kind() != lir_move_normal)) { return false; } + if (UseCompressedOops) { + if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; + if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; + } + if (dst->is_register()) { if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { return !PatchALot; } else if (src->is_single_stack()) { return true;
*** 251,270 **** --- 256,275 ---- int value_offset = java_lang_String:: value_offset_in_bytes(); // char array int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position int count_offset = java_lang_String:: count_offset_in_bytes(); ! __ ld_ptr(str0, value_offset, tmp0); ! __ load_heap_oop(str0, value_offset, tmp0); __ ld(str0, offset_offset, tmp2); __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); __ ld(str0, count_offset, str0); __ sll(tmp2, exact_log2(sizeof(jchar)), tmp2); // str1 may be null add_debug_info_for_null_check_here(info); ! __ ld_ptr(str1, value_offset, tmp1); ! __ load_heap_oop(str1, value_offset, tmp1); __ add(tmp0, tmp2, tmp0); __ ld(str1, offset_offset, tmp2); __ add(tmp1, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp1); __ ld(str1, count_offset, str1);
*** 764,774 **** --- 769,779 ---- } void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { add_debug_info_for_null_check_here(op->info()); ! __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); ! __ load_klass(O0, G3_scratch); if (__ is_simm13(op->vtable_offset())) { __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); } else { // This will generate 2 instructions __ set(op->vtable_offset(), G5_method);
*** 778,919 **** --- 783,803 ---- __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3_scratch); __ callr(G3_scratch, G0); // the peephole pass fills the delay slot } // load with 32-bit displacement int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { int load_offset = code_offset(); if (Assembler::is_simm13(disp)) { if (info != NULL) add_debug_info_for_null_check_here(info); switch(ld_type) { case T_BOOLEAN: // fall through case T_BYTE : __ ldsb(s, disp, d); break; case T_CHAR : __ lduh(s, disp, d); break; case T_SHORT : __ ldsh(s, disp, d); break; case T_INT : __ ld(s, disp, d); break; case T_ADDRESS:// fall through case T_ARRAY : // fall through case T_OBJECT: __ ld_ptr(s, disp, d); break; default : ShouldNotReachHere(); } } else { __ set(disp, O7); if (info != NULL) add_debug_info_for_null_check_here(info); load_offset = code_offset(); switch(ld_type) { case T_BOOLEAN: // fall through case T_BYTE : __ ldsb(s, O7, d); break; case T_CHAR : __ lduh(s, O7, d); break; case T_SHORT : __ ldsh(s, O7, d); break; case T_INT : __ ld(s, O7, d); break; case T_ADDRESS:// fall through case T_ARRAY : // fall through case T_OBJECT: __ ld_ptr(s, O7, d); break; default : ShouldNotReachHere(); } } if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d); return load_offset; } // store with 32-bit displacement void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) { if (Assembler::is_simm13(offset)) { if (info != NULL) add_debug_info_for_null_check_here(info); switch (type) { case T_BOOLEAN: // fall through case T_BYTE : __ stb(value, base, offset); break; case T_CHAR : __ sth(value, base, offset); break; case T_SHORT : __ sth(value, base, offset); break; case T_INT : __ stw(value, base, offset); break; case T_ADDRESS:// fall through case T_ARRAY : // fall through case T_OBJECT: __ st_ptr(value, base, offset); break; default : ShouldNotReachHere(); } } else { __ set(offset, O7); if (info != NULL) add_debug_info_for_null_check_here(info); switch (type) { case T_BOOLEAN: // fall through case T_BYTE : __ stb(value, base, O7); break; case T_CHAR : __ sth(value, base, O7); break; case T_SHORT : __ sth(value, base, O7); break; case T_INT : __ stw(value, base, O7); break; case T_ADDRESS:// fall through case T_ARRAY : //fall through case T_OBJECT: __ st_ptr(value, base, O7); break; default : ShouldNotReachHere(); } } // Note: Do the store before verification as the code might be patched! if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value); } // load float with 32-bit displacement void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { FloatRegisterImpl::Width w; switch(ld_type) { case T_FLOAT : w = FloatRegisterImpl::S; break; case T_DOUBLE: w = FloatRegisterImpl::D; break; default : ShouldNotReachHere(); } if (Assembler::is_simm13(disp)) { if (info != NULL) add_debug_info_for_null_check_here(info); if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) { __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor()); __ ldf(FloatRegisterImpl::S, s, disp , d); } else { __ ldf(w, s, disp, d); } } else { __ set(disp, O7); if (info != NULL) add_debug_info_for_null_check_here(info); __ ldf(w, s, O7, d); } } // store float with 32-bit displacement void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) { FloatRegisterImpl::Width w; switch(type) { case T_FLOAT : w = FloatRegisterImpl::S; break; case T_DOUBLE: w = FloatRegisterImpl::D; break; default : ShouldNotReachHere(); } if (Assembler::is_simm13(offset)) { if (info != NULL) add_debug_info_for_null_check_here(info); if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) { __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord); __ stf(FloatRegisterImpl::S, value , base, offset); } else { __ stf(w, value, base, offset); } } else { __ set(offset, O7); if (info != NULL) add_debug_info_for_null_check_here(info); __ stf(w, value, O7, base); } } int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) { + int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { int store_offset; if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { assert(!unaligned, "can't handle this"); // for offsets larger than a simm13 we setup the offset in O7 __ set(offset, O7); ! store_offset = store(from_reg, base, O7, type, wide); } else { ! if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); ! if (type == T_ARRAY || type == T_OBJECT) { + __ verify_oop(from_reg->as_register()); + } store_offset = code_offset(); switch (type) { case T_BOOLEAN: // fall through case T_BYTE : __ stb(from_reg->as_register(), base, offset); break; case T_CHAR : __ sth(from_reg->as_register(), base, offset); break;
*** 932,944 **** --- 816,841 ---- assert(Assembler::is_simm13(offset + 4), "must be"); __ stw(from_reg->as_register_lo(), base, offset + lo_word_offset_in_bytes); __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); #endif break; - case T_ADDRESS:// fall through + __ st_ptr(from_reg->as_register(), base, offset); + break; case T_ARRAY : // fall through - case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break; + { + if (UseCompressedOops && !wide) { + __ encode_heap_oop(from_reg->as_register(), G3_scratch); + store_offset = code_offset(); + __ stw(G3_scratch, base, offset); + } else { + __ st_ptr(from_reg->as_register(), base, offset); + } + break; + } + case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; case T_DOUBLE: { FloatRegister reg = from_reg->as_double_reg(); // split unaligned stores
*** 956,967 **** --- 853,866 ---- } return store_offset; } ! int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { ! if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); ! if (type == T_ARRAY || type == T_OBJECT) { + __ verify_oop(from_reg->as_register()); + } int store_offset = code_offset(); switch (type) { case T_BOOLEAN: // fall through case T_BYTE : __ stb(from_reg->as_register(), base, disp); break; case T_CHAR : __ sth(from_reg->as_register(), base, disp); break;
*** 973,1001 **** --- 872,912 ---- #else assert(from_reg->as_register_hi()->successor() == from_reg->as_register_lo(), "must match"); __ std(from_reg->as_register_hi(), base, disp); #endif break; - case T_ADDRESS:// fall through + __ st_ptr(from_reg->as_register(), base, disp); + break; case T_ARRAY : // fall through - case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break; + { + if (UseCompressedOops && !wide) { + __ encode_heap_oop(from_reg->as_register(), G3_scratch); + store_offset = code_offset(); + __ stw(G3_scratch, base, disp); + } else { + __ st_ptr(from_reg->as_register(), base, disp); + } + break; + } case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; default : ShouldNotReachHere(); } return store_offset; } ! int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { int load_offset; if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { assert(base != O7, "destroying register"); assert(!unaligned, "can't handle this"); // for offsets larger than a simm13 we setup the offset in O7 __ set(offset, O7); ! load_offset = load(base, O7, to_reg, type, wide); } else { load_offset = code_offset(); switch(type) { case T_BOOLEAN: // fall through case T_BYTE : __ ldsb(base, offset, to_reg->as_register()); break;
*** 1028,1040 **** --- 939,960 ---- __ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi()); } #endif } break; ! case T_ADDRESS:// fall through ! case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; case T_ARRAY : // fall through - case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break; + { + if (UseCompressedOops && !wide) { + __ lduw(base, offset, to_reg->as_register()); + __ decode_heap_oop(to_reg->as_register()); + } else { + __ ld_ptr(base, offset, to_reg->as_register()); + } + break; + } case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; case T_DOUBLE: { FloatRegister reg = to_reg->as_double_reg(); // split unaligned loads
*** 1046,1072 **** --- 966,1003 ---- } break; } default : ShouldNotReachHere(); } ! if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); ! if (type == T_ARRAY || type == T_OBJECT) { + __ verify_oop(to_reg->as_register()); + } } return load_offset; } ! int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { int load_offset = code_offset(); switch(type) { case T_BOOLEAN: // fall through case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; case T_INT : __ ld(base, disp, to_reg->as_register()); break; ! case T_ADDRESS:// fall through ! case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; case T_ARRAY : // fall through - case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break; + { + if (UseCompressedOops && !wide) { + __ lduw(base, disp, to_reg->as_register()); + __ decode_heap_oop(to_reg->as_register()); + } else { + __ ld_ptr(base, disp, to_reg->as_register()); + } + break; + } case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; case T_LONG : #ifdef _LP64 __ ldx(base, disp, to_reg->as_register_lo());
*** 1076,1140 **** --- 1007,1027 ---- __ ldd(base, disp, to_reg->as_register_hi()); #endif break; default : ShouldNotReachHere(); } ! if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); ! if (type == T_ARRAY || type == T_OBJECT) { + __ verify_oop(to_reg->as_register()); + } return load_offset; } // load/store with an Address void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) { load(a.base(), a.disp() + offset, d, ld_type, info); } void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { store(value, dest.base(), dest.disp() + offset, type, info); } // loadf/storef with an Address void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) { load(a.base(), a.disp() + offset, d, ld_type, info); } void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { store(value, dest.base(), dest.disp() + offset, type, info); } // load/store with an Address void LIR_Assembler::load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo *info) { load(as_Address(a), d, ld_type, info); } void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { store(value, as_Address(dest), type, info); } // loadf/storef with an Address void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { load(as_Address(a), d, ld_type, info); } void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { store(value, as_Address(dest), type, info); } void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { LIR_Const* c = src->as_constant_ptr(); switch (c->type()) { case T_INT: ! case T_FLOAT: { case T_ADDRESS: { Register src_reg = O7; int value = c->as_jint_bits(); if (value == 0) { src_reg = G0; } else {
*** 1142,1151 **** --- 1029,1050 ---- } Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); __ stw(src_reg, addr.base(), addr.disp()); break; } + case T_ADDRESS: { + Register src_reg = O7; + int value = c->as_jint_bits(); + if (value == 0) { + src_reg = G0; + } else { + __ set(value, O7); + } + Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); + __ st_ptr(src_reg, addr.base(), addr.disp()); + break; + } case T_OBJECT: { Register src_reg = O7; jobject2reg(c->as_jobject(), src_reg); Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); __ st_ptr(src_reg, addr.base(), addr.disp());
*** 1176,1193 **** --- 1075,1090 ---- Unimplemented(); } } ! void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { LIR_Const* c = src->as_constant_ptr(); LIR_Address* addr = dest->as_address_ptr(); Register base = addr->base()->as_pointer_register(); + int offset = -1; if (info != NULL) { add_debug_info_for_null_check_here(info); } switch (c->type()) { case T_INT: case T_FLOAT: case T_ADDRESS: { LIR_Opr tmp = FrameMap::O7_opr;
*** 1197,1234 **** --- 1094,1131 ---- } else if (Assembler::is_simm13(value)) { __ set(value, O7); } if (addr->index()->is_valid()) { assert(addr->disp() == 0, "must be zero"); ! offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); } else { assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); ! offset = store(tmp, base, addr->disp(), type, wide, false); } break; } case T_LONG: case T_DOUBLE: { assert(!addr->index()->is_valid(), "can't handle reg reg address here"); assert(Assembler::is_simm13(addr->disp()) && Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); ! Register tmp = O7; ! LIR_Opr tmp = FrameMap::O7_opr; int value_lo = c->as_jint_lo_bits(); if (value_lo == 0) { ! tmp = FrameMap::G0_opr; } else { __ set(value_lo, O7); } ! offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); int value_hi = c->as_jint_hi_bits(); if (value_hi == 0) { ! tmp = FrameMap::G0_opr; } else { __ set(value_hi, O7); } ! offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); break; } case T_OBJECT: { jobject obj = c->as_jobject(); LIR_Opr tmp;
*** 1239,1259 **** --- 1136,1160 ---- jobject2reg(c->as_jobject(), O7); } // handle either reg+reg or reg+disp address if (addr->index()->is_valid()) { assert(addr->disp() == 0, "must be zero"); ! offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); } else { assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); ! offset = store(tmp, base, addr->disp(), type, wide, false); } break; } default: Unimplemented(); } + if (info != NULL) { + assert(offset != -1, "offset should've been set"); + add_debug_info_for_null_check(offset, info); + } } void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { LIR_Const* c = src->as_constant_ptr();
*** 1334,1344 **** --- 1235,1245 ---- } else { assert(to_reg->is_single_cpu(), "Must be a cpu register."); __ set(const_addrlit, O7); ! load(O7, 0, to_reg->as_register(), T_INT); ! __ ld(O7, 0, to_reg->as_register()); } } break; case T_DOUBLE:
*** 1427,1437 **** --- 1328,1338 ---- return Address(base.base(), base.disp() + lo_word_offset_in_bytes); } void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, ! LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { LIR_Address* addr = src_opr->as_address_ptr(); LIR_Opr to_reg = dest; Register src = addr->base()->as_pointer_register();
*** 1473,1492 **** --- 1374,1392 ---- // entered in increasing order. int offset = code_offset(); assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); if (disp_reg == noreg) { ! offset = load(src, disp_value, to_reg, type, wide, unaligned); } else { assert(!unaligned, "can't handle this"); ! offset = load(src, disp_reg, to_reg, type, wide); } if (patch != NULL) { patching_epilog(patch, patch_code, src, info); } if (info != NULL) add_debug_info_for_null_check(offset, info); } void LIR_Assembler::prefetchr(LIR_Opr src) {
*** 1516,1526 **** --- 1416,1426 ---- } else if (src->is_double_word()) { addr = frame_map()->address_for_double_slot(src->double_stack_ix()); } bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; ! load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); } void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { Address addr;
*** 1528,1538 **** --- 1428,1438 ---- addr = frame_map()->address_for_slot(dest->single_stack_ix()); } else if (dest->is_double_word()) { addr = frame_map()->address_for_slot(dest->double_stack_ix()); } bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; ! store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); } void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { if (from_reg->is_float_kind() && to_reg->is_float_kind()) {
*** 1576,1586 **** --- 1476,1486 ---- } void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, ! bool wide, bool unaligned) { LIR_Address* addr = dest->as_address_ptr(); Register src = addr->base()->as_pointer_register(); Register disp_reg = noreg; int disp_value = addr->disp();
*** 1620,1633 **** --- 1520,1533 ---- // entered in increasing order. int offset; assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); if (disp_reg == noreg) { ! offset = store(from_reg, src, disp_value, type, wide, unaligned); } else { assert(!unaligned, "can't handle this"); ! offset = store(from_reg, src, disp_reg, type, wide); } if (patch != NULL) { patching_epilog(patch, patch_code, src, info); }
*** 2182,2198 **** --- 2082,2098 ---- assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); // make sure src and dst are non-null and load array length if (flags & LIR_OpArrayCopy::src_null_check) { __ tst(src); ! __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); __ delayed()->nop(); } if (flags & LIR_OpArrayCopy::dst_null_check) { __ tst(dst); ! __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); __ delayed()->nop(); } if (flags & LIR_OpArrayCopy::src_pos_positive_check) { // test src_pos register
*** 2230,2243 **** --- 2130,2149 ---- __ br(Assembler::carrySet, false, Assembler::pn, *stub->entry()); __ delayed()->nop(); } if (flags & LIR_OpArrayCopy::type_check) { + if (UseCompressedOops) { + // We don't need decode because we just need to compare + __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); + } else { __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); + } __ cmp(tmp, tmp2); ! __ br(Assembler::notEqual, Assembler::heap_oop_cc(), false, Assembler::pt, *stub->entry()); __ delayed()->nop(); } #ifdef ASSERT if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
*** 2248,2270 **** --- 2154,2190 ---- // dst type is exactly the expected type and the src type is a // subtype which we can't check or src is the same array as dst // but not necessarily exactly of type default_type. Label known_ok, halt; jobject2reg(op->expected_type()->constant_encoding(), tmp); + if (UseCompressedOops) { + // tmp holds the default type. It currently comes uncompressed after the + // load of a constant, so encode it. + __ encode_heap_oop(tmp); + // load the raw value of the dst klass, since we will be comparing + // uncompressed values directly. + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); + } else { __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); + } if (basic_type != T_OBJECT) { __ cmp(tmp, tmp2); ! __ br(Assembler::notEqual, Assembler::heap_oop_cc(), false, Assembler::pn, halt); + if (UseCompressedOops) { + // load the raw value of the src klass. + __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); + } else { __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); + } __ cmp(tmp, tmp2); ! __ br(Assembler::equal, Assembler::heap_oop_cc(), false, Assembler::pn, known_ok); __ delayed()->nop(); } else { __ cmp(tmp, tmp2); ! __ br(Assembler::equal, Assembler::heap_oop_cc(), false, Assembler::pn, known_ok); __ delayed()->cmp(src, dst); ! __ brx(Assembler::equal, false, Assembler::pn, known_ok); __ delayed()->nop(); } __ bind(halt); __ stop("incorrect type information in arraycopy"); __ bind(known_ok);
*** 2469,2479 **** --- 2389,2399 ---- // Didn't find receiver; find next empty slot and fill it in for (i = 0; i < VirtualCallData::row_limit(); i++) { Label next_test; Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias); ! load(recv_addr, tmp1, T_OBJECT); ! __ ld_ptr(recv_addr, tmp1); __ br_notnull(tmp1, false, Assembler::pt, next_test); __ delayed()->nop(); __ st_ptr(recv, recv_addr); __ set(DataLayout::counter_increment, tmp1); __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
*** 2561,2571 **** --- 2481,2491 ---- } assert(obj != k_RInfo, "must be different"); // get object class // not a safepoint as obj null check happens earlier ! load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); ! __ load_klass(obj, klass_RInfo); if (op->fast_check()) { assert_different_registers(klass_RInfo, k_RInfo); __ cmp(k_RInfo, klass_RInfo); __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target); __ delayed()->nop();
*** 2603,2613 **** --- 2523,2533 ---- jobject2reg(md->constant_encoding(), mdo); if (mdo_offset_bias > 0) { __ set(mdo_offset_bias, tmp1); __ add(mdo, tmp1, mdo); } ! load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); ! __ load_klass(obj, recv); type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); // Jump over the failure case __ ba(false, *success); __ delayed()->nop(); // Cast failure case
*** 2672,2686 **** --- 2592,2607 ---- __ bind(not_null); } else { __ br_null(value, false, Assembler::pn, done); __ delayed()->nop(); } ! load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception()); ! load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); ! add_debug_info_for_null_check_here(op->info_for_exception()); ! __ load_klass(array, k_RInfo); + __ load_klass(value, klass_RInfo); // get instance klass ! load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL); ! __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo); // perform the fast part of the checking logic __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); // call out-of-line instance of __ check_klass_subtype_slow_path(...): assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
*** 2698,2708 **** --- 2619,2629 ---- jobject2reg(md->constant_encoding(), mdo); if (mdo_offset_bias > 0) { __ set(mdo_offset_bias, tmp1); __ add(mdo, tmp1, mdo); } ! load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); ! __ load_klass(value, recv); type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); __ ba(false, done); __ delayed()->nop(); // Cast failure case __ bind(profile_cast_failure);
*** 2779,2794 **** --- 2700,2718 ---- Register new_value = op->new_value()->as_register(); Register t1 = op->tmp1()->as_register(); Register t2 = op->tmp2()->as_register(); __ mov(cmp_value, t1); __ mov(new_value, t2); #ifdef _LP64 if (op->code() == lir_cas_obj) { + if (UseCompressedOops) { + __ encode_heap_oop(t1); + __ encode_heap_oop(t2); + __ cas(addr, t1, t2); + } else { __ casx(addr, t1, t2); } else #endif { + } + } else { __ cas(addr, t1, t2); } __ cmp(t1, t2); } else { Unimplemented();
*** 2964,2974 **** --- 2888,2898 ---- __ st_ptr(tmp1, data_addr); return; } } } else { ! load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); ! __ load_klass(recv, recv); Label update_done; type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); // Receiver did not match any saved receiver and there is no empty row for it. // Increment total counter to indicate polymorphic case. __ ld_ptr(counter_addr, tmp1);
*** 3158,3168 **** --- 3082,3092 ---- } } else { // use normal move for all other volatiles since they don't need // special handling to remain atomic. ! move_op(src, dest, type, lir_patch_none, info, false, false); ! move_op(src, dest, type, lir_patch_none, info, false, false, false); } } void LIR_Assembler::membar() { // only StoreLoad membars are ever explicitly needed on sparcs in TSO mode

src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File