995 else __ fst_d (dst_addr);
996
997 } else {
998 ShouldNotReachHere();
999 }
1000 }
1001
1002
1003 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
1004 LIR_Address* to_addr = dest->as_address_ptr();
1005 PatchingStub* patch = NULL;
1006 Register compressed_src = rscratch1;
1007
1008 if (type == T_ARRAY || type == T_OBJECT) {
1009 __ verify_oop(src->as_register());
1010 #ifdef _LP64
1011 if (UseCompressedOops && !wide) {
1012 __ movptr(compressed_src, src->as_register());
1013 __ encode_heap_oop(compressed_src);
1014 if (patch_code != lir_patch_none) {
1015 info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
1016 }
1017 }
1018 #endif
1019 }
1020
1021 if (patch_code != lir_patch_none) {
1022 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1023 Address toa = as_Address(to_addr);
1024 assert(toa.disp() != 0, "must have");
1025 }
1026
1027 int null_check_here = code_offset();
1028 switch (type) {
1029 case T_FLOAT: {
1030 if (src->is_single_xmm()) {
1031 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1032 } else {
1033 assert(src->is_single_fpu(), "must be");
1034 assert(src->fpu_regnr() == 0, "argument must be on TOS");
1035 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1095 }
1096 __ movl(as_Address_lo(to_addr), from_lo);
1097 } else {
1098 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1099 __ movl(as_Address_lo(to_addr), from_lo);
1100 if (patch != NULL) {
1101 patching_epilog(patch, lir_patch_low, base, info);
1102 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1103 patch_code = lir_patch_high;
1104 }
1105 __ movl(as_Address_hi(to_addr), from_hi);
1106 }
1107 #endif // _LP64
1108 break;
1109 }
1110
1111 case T_BYTE: // fall through
1112 case T_BOOLEAN: {
1113 Register src_reg = src->as_register();
1114 Address dst_addr = as_Address(to_addr);
1115 assert(VM_Version::is_P6() || src_reg->has_byte_register(), "must use byte registers if not P6");
1116 __ movb(dst_addr, src_reg);
1117 break;
1118 }
1119
1120 case T_CHAR: // fall through
1121 case T_SHORT:
1122 __ movw(as_Address(to_addr), src->as_register());
1123 break;
1124
1125 default:
1126 ShouldNotReachHere();
1127 }
1128 if (info != NULL) {
1129 add_debug_info_for_null_check(null_check_here, info);
1130 }
1131
1132 if (patch_code != lir_patch_none) {
1133 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1134 }
1135 }
1321 patch_code = lir_patch_low;
1322 }
1323 __ movl(to_lo, as_Address_lo(addr));
1324 } else {
1325 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1326 __ movl(to_lo, as_Address_lo(addr));
1327 if (patch != NULL) {
1328 patching_epilog(patch, lir_patch_low, base, info);
1329 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1330 patch_code = lir_patch_high;
1331 }
1332 __ movl(to_hi, as_Address_hi(addr));
1333 }
1334 #endif // _LP64
1335 break;
1336 }
1337
1338 case T_BOOLEAN: // fall through
1339 case T_BYTE: {
1340 Register dest_reg = dest->as_register();
1341 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1342 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1343 __ movsbl(dest_reg, from_addr);
1344 } else {
1345 __ movb(dest_reg, from_addr);
1346 __ shll(dest_reg, 24);
1347 __ sarl(dest_reg, 24);
1348 }
1349 break;
1350 }
1351
1352 case T_CHAR: {
1353 Register dest_reg = dest->as_register();
1354 assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
1355 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1356 __ movzwl(dest_reg, from_addr);
1357 } else {
1358 __ movw(dest_reg, from_addr);
1359 }
1360 break;
1361 }
1362
1363 case T_SHORT: {
1364 Register dest_reg = dest->as_register();
1365 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1366 __ movswl(dest_reg, from_addr);
1367 } else {
1368 __ movw(dest_reg, from_addr);
1369 __ shll(dest_reg, 16);
1370 __ sarl(dest_reg, 16);
1371 }
1372 break;
1373 }
1374
1971
1972
1973 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1974 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1975 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1976 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1977 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1978 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1979 Register addr = op->addr()->as_register();
1980 if (os::is_MP()) {
1981 __ lock();
1982 }
1983 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1984
1985 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1986 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1987 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1988 Register newval = op->new_value()->as_register();
1989 Register cmpval = op->cmp_value()->as_register();
1990 assert(cmpval == rax, "wrong register");
1991 assert(newval != NULL, "new val must be register");
1992 assert(cmpval != newval, "cmp and new values must be in different registers");
1993 assert(cmpval != addr, "cmp and addr must be in different registers");
1994 assert(newval != addr, "new value and addr must be in different registers");
1995
1996 if ( op->code() == lir_cas_obj) {
1997 #ifdef _LP64
1998 if (UseCompressedOops) {
1999 __ encode_heap_oop(cmpval);
2000 __ mov(rscratch1, newval);
2001 __ encode_heap_oop(rscratch1);
2002 if (os::is_MP()) {
2003 __ lock();
2004 }
2005 // cmpval (rax) is implicitly used by this instruction
2006 __ cmpxchgl(rscratch1, Address(addr, 0));
2007 } else
2008 #endif
2009 {
2010 if (os::is_MP()) {
2011 __ lock();
2012 }
2013 __ cmpxchgptr(newval, Address(addr, 0));
2014 }
2015 } else {
2016 assert(op->code() == lir_cas_int, "lir_cas_int expected");
2017 if (os::is_MP()) {
2018 __ lock();
2019 }
2020 __ cmpxchgl(newval, Address(addr, 0));
2021 }
2022 #ifdef _LP64
2023 } else if (op->code() == lir_cas_long) {
2024 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2025 Register newval = op->new_value()->as_register_lo();
2026 Register cmpval = op->cmp_value()->as_register_lo();
2027 assert(cmpval == rax, "wrong register");
2028 assert(newval != NULL, "new val must be register");
2029 assert(cmpval != newval, "cmp and new values must be in different registers");
2030 assert(cmpval != addr, "cmp and addr must be in different registers");
2031 assert(newval != addr, "new value and addr must be in different registers");
2032 if (os::is_MP()) {
2033 __ lock();
2034 }
2035 __ cmpxchgq(newval, Address(addr, 0));
2036 #endif // _LP64
2037 } else {
2038 Unimplemented();
2039 }
2040 }
2041
2042 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2043 Assembler::Condition acond, ncond;
2044 switch (condition) {
2045 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2046 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2047 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2048 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
|
995 else __ fst_d (dst_addr);
996
997 } else {
998 ShouldNotReachHere();
999 }
1000 }
1001
1002
1003 void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
1004 LIR_Address* to_addr = dest->as_address_ptr();
1005 PatchingStub* patch = NULL;
1006 Register compressed_src = rscratch1;
1007
1008 if (type == T_ARRAY || type == T_OBJECT) {
1009 __ verify_oop(src->as_register());
1010 #ifdef _LP64
1011 if (UseCompressedOops && !wide) {
1012 __ movptr(compressed_src, src->as_register());
1013 __ encode_heap_oop(compressed_src);
1014 if (patch_code != lir_patch_none) {
1015 info->oop_map()->set_narrowoop(compressed_src.as_VMReg());
1016 }
1017 }
1018 #endif
1019 }
1020
1021 if (patch_code != lir_patch_none) {
1022 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1023 Address toa = as_Address(to_addr);
1024 assert(toa.disp() != 0, "must have");
1025 }
1026
1027 int null_check_here = code_offset();
1028 switch (type) {
1029 case T_FLOAT: {
1030 if (src->is_single_xmm()) {
1031 __ movflt(as_Address(to_addr), src->as_xmm_float_reg());
1032 } else {
1033 assert(src->is_single_fpu(), "must be");
1034 assert(src->fpu_regnr() == 0, "argument must be on TOS");
1035 if (pop_fpu_stack) __ fstp_s(as_Address(to_addr));
1095 }
1096 __ movl(as_Address_lo(to_addr), from_lo);
1097 } else {
1098 assert(index == noreg || (index != base && index != from_lo), "can't handle this");
1099 __ movl(as_Address_lo(to_addr), from_lo);
1100 if (patch != NULL) {
1101 patching_epilog(patch, lir_patch_low, base, info);
1102 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1103 patch_code = lir_patch_high;
1104 }
1105 __ movl(as_Address_hi(to_addr), from_hi);
1106 }
1107 #endif // _LP64
1108 break;
1109 }
1110
1111 case T_BYTE: // fall through
1112 case T_BOOLEAN: {
1113 Register src_reg = src->as_register();
1114 Address dst_addr = as_Address(to_addr);
1115 assert(VM_Version::is_P6() || src_reg.has_byte_register(), "must use byte registers if not P6");
1116 __ movb(dst_addr, src_reg);
1117 break;
1118 }
1119
1120 case T_CHAR: // fall through
1121 case T_SHORT:
1122 __ movw(as_Address(to_addr), src->as_register());
1123 break;
1124
1125 default:
1126 ShouldNotReachHere();
1127 }
1128 if (info != NULL) {
1129 add_debug_info_for_null_check(null_check_here, info);
1130 }
1131
1132 if (patch_code != lir_patch_none) {
1133 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
1134 }
1135 }
1321 patch_code = lir_patch_low;
1322 }
1323 __ movl(to_lo, as_Address_lo(addr));
1324 } else {
1325 assert(index == noreg || (index != base && index != to_lo), "can't handle this");
1326 __ movl(to_lo, as_Address_lo(addr));
1327 if (patch != NULL) {
1328 patching_epilog(patch, lir_patch_low, base, info);
1329 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
1330 patch_code = lir_patch_high;
1331 }
1332 __ movl(to_hi, as_Address_hi(addr));
1333 }
1334 #endif // _LP64
1335 break;
1336 }
1337
1338 case T_BOOLEAN: // fall through
1339 case T_BYTE: {
1340 Register dest_reg = dest->as_register();
1341 assert(VM_Version::is_P6() || dest_reg.has_byte_register(), "must use byte registers if not P6");
1342 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1343 __ movsbl(dest_reg, from_addr);
1344 } else {
1345 __ movb(dest_reg, from_addr);
1346 __ shll(dest_reg, 24);
1347 __ sarl(dest_reg, 24);
1348 }
1349 break;
1350 }
1351
1352 case T_CHAR: {
1353 Register dest_reg = dest->as_register();
1354 assert(VM_Version::is_P6() || dest_reg.has_byte_register(), "must use byte registers if not P6");
1355 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1356 __ movzwl(dest_reg, from_addr);
1357 } else {
1358 __ movw(dest_reg, from_addr);
1359 }
1360 break;
1361 }
1362
1363 case T_SHORT: {
1364 Register dest_reg = dest->as_register();
1365 if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
1366 __ movswl(dest_reg, from_addr);
1367 } else {
1368 __ movw(dest_reg, from_addr);
1369 __ shll(dest_reg, 16);
1370 __ sarl(dest_reg, 16);
1371 }
1372 break;
1373 }
1374
1971
1972
1973 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1974 if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
1975 assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
1976 assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
1977 assert(op->new_value()->as_register_lo() == rbx, "wrong register");
1978 assert(op->new_value()->as_register_hi() == rcx, "wrong register");
1979 Register addr = op->addr()->as_register();
1980 if (os::is_MP()) {
1981 __ lock();
1982 }
1983 NOT_LP64(__ cmpxchg8(Address(addr, 0)));
1984
1985 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
1986 NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
1987 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
1988 Register newval = op->new_value()->as_register();
1989 Register cmpval = op->cmp_value()->as_register();
1990 assert(cmpval == rax, "wrong register");
1991 assert(newval != noreg, "new val must be register");
1992 assert(cmpval != newval, "cmp and new values must be in different registers");
1993 assert(cmpval != addr, "cmp and addr must be in different registers");
1994 assert(newval != addr, "new value and addr must be in different registers");
1995
1996 if ( op->code() == lir_cas_obj) {
1997 #ifdef _LP64
1998 if (UseCompressedOops) {
1999 __ encode_heap_oop(cmpval);
2000 __ mov(rscratch1, newval);
2001 __ encode_heap_oop(rscratch1);
2002 if (os::is_MP()) {
2003 __ lock();
2004 }
2005 // cmpval (rax) is implicitly used by this instruction
2006 __ cmpxchgl(rscratch1, Address(addr, 0));
2007 } else
2008 #endif
2009 {
2010 if (os::is_MP()) {
2011 __ lock();
2012 }
2013 __ cmpxchgptr(newval, Address(addr, 0));
2014 }
2015 } else {
2016 assert(op->code() == lir_cas_int, "lir_cas_int expected");
2017 if (os::is_MP()) {
2018 __ lock();
2019 }
2020 __ cmpxchgl(newval, Address(addr, 0));
2021 }
2022 #ifdef _LP64
2023 } else if (op->code() == lir_cas_long) {
2024 Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
2025 Register newval = op->new_value()->as_register_lo();
2026 Register cmpval = op->cmp_value()->as_register_lo();
2027 assert(cmpval == rax, "wrong register");
2028 assert(newval != noreg, "new val must be register");
2029 assert(cmpval != newval, "cmp and new values must be in different registers");
2030 assert(cmpval != addr, "cmp and addr must be in different registers");
2031 assert(newval != addr, "new value and addr must be in different registers");
2032 if (os::is_MP()) {
2033 __ lock();
2034 }
2035 __ cmpxchgq(newval, Address(addr, 0));
2036 #endif // _LP64
2037 } else {
2038 Unimplemented();
2039 }
2040 }
2041
2042 void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) {
2043 Assembler::Condition acond, ncond;
2044 switch (condition) {
2045 case lir_cond_equal: acond = Assembler::equal; ncond = Assembler::notEqual; break;
2046 case lir_cond_notEqual: acond = Assembler::notEqual; ncond = Assembler::equal; break;
2047 case lir_cond_less: acond = Assembler::less; ncond = Assembler::greaterEqual; break;
2048 case lir_cond_lessEqual: acond = Assembler::lessEqual; ncond = Assembler::greater; break;
|