src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp

Print this page
rev 3419 : 7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
Summary: use shorter instruction sequences for atomic add and atomic exchange when possible.
Reviewed-by:


1186       if (is_obj) {
1187         // This address is precise
1188         post_barrier(LIR_OprFact::address(addr), data);
1189       }
1190     }
1191 }
1192 
1193 
1194 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1195                                      BasicType type, bool is_volatile) {
1196 #ifndef _LP64
1197   if (is_volatile && type == T_LONG) {
1198     __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
1199   } else
1200 #endif
1201     {
1202     LIR_Address* addr = new LIR_Address(src, offset, type);
1203     __ load(addr, dst);
1204   }
1205 }

























































1186       if (is_obj) {
1187         // This address is precise
1188         post_barrier(LIR_OprFact::address(addr), data);
1189       }
1190     }
1191 }
1192 
1193 
1194 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1195                                      BasicType type, bool is_volatile) {
1196 #ifndef _LP64
1197   if (is_volatile && type == T_LONG) {
1198     __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
1199   } else
1200 #endif
1201     {
1202     LIR_Address* addr = new LIR_Address(src, offset, type);
1203     __ load(addr, dst);
1204   }
1205 }
1206 
1207 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1208   BasicType type = x->basic_type();
1209   LIRItem src(x->object(), this);
1210   LIRItem off(x->offset(), this);
1211   LIRItem value(x->value(), this);
1212 
1213   src.load_item();
1214   value.load_item();
1215   off.load_nonconstant();
1216 
1217   LIR_Opr dst = rlock_result(x, type);
1218   LIR_Opr data = value.result();
1219   bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1220   LIR_Opr offset = off.result();
1221 
1222   if (data != dst) {
1223     __ move(data, dst);
1224     data = dst;
1225   }
1226 
1227   assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
1228   LIR_Address* addr;
1229   if (offset->is_constant()) {
1230     
1231 #ifdef _LP64
1232     jlong l = offset->as_jlong();
1233     assert((jlong)((jint)l) == l, "offset too large for constant");
1234     jint c = (jint)l;
1235 #else
1236     jint c = offset->as_jint();
1237 #endif
1238     addr = new LIR_Address(src.result(), c, type);
1239   } else {
1240     addr = new LIR_Address(src.result(), offset, type);
1241   }
1242 
1243   LIR_Opr tmp = LIR_OprFact::illegalOpr;
1244   LIR_Opr ptr = LIR_OprFact::illegalOpr;
1245 
1246   if (is_obj) {
1247     // Do the pre-write barrier, if any.
1248     // barriers on sparc don't work with a base + index address
1249     tmp = FrameMap::G3_opr;
1250     ptr = new_pointer_register();
1251     __ add(src.result(), off.result(), ptr);
1252     pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
1253                 true /* do_load */, false /* patch */, NULL);
1254   }
1255   __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
1256   if (is_obj) {
1257     // Seems to be a precise address
1258     post_barrier(ptr, data);
1259   }
1260 }