< prev index next >

src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp

Print this page
rev 54670 : Port of valuetypes to aarch64


  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"

  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "vmreg_aarch64.inline.hpp"
  41 
  42 #ifdef ASSERT
  43 #define __ gen()->lir(__FILE__, __LINE__)->
  44 #else
  45 #define __ gen()->lir()->
  46 #endif
  47 
  48 // Item will be loaded into a byte register; Intel only
  49 void LIRItem::load_byte_item() {
  50   load_item();
  51 }
  52 
  53 
  54 void LIRItem::load_nonconstant() {
  55   LIR_Opr r = value()->operand();
  56   if (r->is_constant()) {
  57     _result = r;


 300     __ shift_left(left, exact_log2(c + 1), tmp);
 301     __ sub(tmp, left, result);
 302     return true;
 303   } else {
 304     return false;
 305   }
 306 }
 307 
 308 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 309   BasicType type = item->type();
 310   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 311 }
 312 
 313 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 314     LIR_Opr tmp1 = new_register(objectType);
 315     LIR_Opr tmp2 = new_register(objectType);
 316     LIR_Opr tmp3 = new_register(objectType);
 317     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 318 }
 319 









 320 //----------------------------------------------------------------------
 321 //             visitor functions
 322 //----------------------------------------------------------------------
 323 
 324 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 325   assert(x->is_pinned(),"");
 326   LIRItem obj(x->obj(), this);
 327   obj.load_item();
 328 
 329   set_no_result(x);
 330 
 331   // "lock" stores the address of the monitor stack slot, so this is not an oop
 332   LIR_Opr lock = new_register(T_INT);
 333   // Need a scratch register for biased locking
 334   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 335   if (UseBiasedLocking) {
 336     scratch = new_register(T_INT);
 337   }
 338 
 339   CodeEmitInfo* info_for_exception = NULL;
 340   if (x->needs_null_check()) {
 341     info_for_exception = state_for(x);
 342   }






 343   // this CodeEmitInfo must not have the xhandlers because here the
 344   // object is already locked (xhandlers expect object to be unlocked)
 345   CodeEmitInfo* info = state_for(x, x->state(), true);
 346   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 347                         x->monitor_no(), info_for_exception, info);
 348 }
 349 
 350 
 351 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 352   assert(x->is_pinned(),"");
 353 
 354   LIRItem obj(x->obj(), this);
 355   obj.dont_load_item();
 356 
 357   LIR_Opr lock = new_register(T_INT);
 358   LIR_Opr obj_temp = new_register(T_INT);
 359   set_no_result(x);
 360   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 361 }
 362 
 363 
 364 void LIRGenerator::do_NegateOp(NegateOp* x) {
 365 
 366   LIRItem from(x->x(), this);
 367   from.load_item();


1136 }
1137 
1138 void LIRGenerator::do_NewInstance(NewInstance* x) {
1139 #ifndef PRODUCT
1140   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1141     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1142   }
1143 #endif
1144   CodeEmitInfo* info = state_for(x, x->state());
1145   LIR_Opr reg = result_register_for(x->type());
1146   new_instance(reg, x->klass(), x->is_unresolved(),
1147                        FrameMap::r2_oop_opr,
1148                        FrameMap::r5_oop_opr,
1149                        FrameMap::r4_oop_opr,
1150                        LIR_OprFact::illegalOpr,
1151                        FrameMap::r3_metadata_opr, info);
1152   LIR_Opr result = rlock_result(x);
1153   __ move(reg, result);
1154 }
1155 
















1156 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1157   CodeEmitInfo* info = state_for(x, x->state());
1158 
1159   LIRItem length(x->length(), this);
1160   length.load_item_force(FrameMap::r19_opr);
1161 
1162   LIR_Opr reg = result_register_for(x->type());
1163   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1164   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1165   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1166   LIR_Opr tmp4 = reg;
1167   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1168   LIR_Opr len = length.result();
1169   BasicType elem_type = x->elt_type();
1170 
1171   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1172 
1173   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1174   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1175 


1181   LIRItem length(x->length(), this);
1182   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1183   // and therefore provide the state before the parameters have been consumed
1184   CodeEmitInfo* patching_info = NULL;
1185   if (!x->klass()->is_loaded() || PatchALot) {
1186     patching_info =  state_for(x, x->state_before());
1187   }
1188 
1189   CodeEmitInfo* info = state_for(x, x->state());
1190 
1191   LIR_Opr reg = result_register_for(x->type());
1192   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1193   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1194   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1195   LIR_Opr tmp4 = reg;
1196   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1197 
1198   length.load_item_force(FrameMap::r19_opr);
1199   LIR_Opr len = length.result();
1200 
1201   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);


1202   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1203   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1204     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1205   }
1206   klass2reg_with_patching(klass_reg, obj, patching_info);




1207   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);

1208 
1209   LIR_Opr result = rlock_result(x);
1210   __ move(reg, result);
1211 }
1212 
1213 
1214 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1215   Values* dims = x->dims();
1216   int i = dims->length();
1217   LIRItemList* items = new LIRItemList(i, i, NULL);
1218   while (i-- > 0) {
1219     LIRItem* size = new LIRItem(dims->at(i), this);
1220     items->at_put(i, size);
1221   }
1222 
1223   // Evaluate state_for early since it may emit code.
1224   CodeEmitInfo* patching_info = NULL;
1225   if (!x->klass()->is_loaded() || PatchALot) {
1226     patching_info = state_for(x, x->state_before());
1227 


1281       (x->needs_exception_state() ? state_for(x) :
1282                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1283 
1284   CodeStub* stub;
1285   if (x->is_incompatible_class_change_check()) {
1286     assert(patching_info == NULL, "can't patch this");
1287     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1288   } else if (x->is_invokespecial_receiver_check()) {
1289     assert(patching_info == NULL, "can't patch this");
1290     stub = new DeoptimizeStub(info_for_exception,
1291                               Deoptimization::Reason_class_check,
1292                               Deoptimization::Action_none);
1293   } else {
1294     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1295   }
1296   LIR_Opr reg = rlock_result(x);
1297   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1298   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1299     tmp3 = new_register(objectType);
1300   }


1301   __ checkcast(reg, obj.result(), x->klass(),
1302                new_register(objectType), new_register(objectType), tmp3,
1303                x->direct_compare(), info_for_exception, patching_info, stub,
1304                x->profiled_method(), x->profiled_bci());

1305 }
1306 
1307 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1308   LIRItem obj(x->obj(), this);
1309 
1310   // result and test object may not be in same register
1311   LIR_Opr reg = rlock_result(x);
1312   CodeEmitInfo* patching_info = NULL;
1313   if ((!x->klass()->is_loaded() || PatchALot)) {
1314     // must do this before locking the destination register as an oop register
1315     patching_info = state_for(x, x->state_before());
1316   }
1317   obj.load_item();
1318   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1319   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1320     tmp3 = new_register(objectType);
1321   }
1322   __ instanceof(reg, obj.result(), x->klass(),
1323                 new_register(objectType), new_register(objectType), tmp3,
1324                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());




  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "ci/ciValueKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 
  55 void LIRItem::load_nonconstant() {
  56   LIR_Opr r = value()->operand();
  57   if (r->is_constant()) {
  58     _result = r;


 301     __ shift_left(left, exact_log2(c + 1), tmp);
 302     __ sub(tmp, left, result);
 303     return true;
 304   } else {
 305     return false;
 306   }
 307 }
 308 
 309 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
 310   BasicType type = item->type();
 311   __ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
 312 }
 313 
 314 void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
 315     LIR_Opr tmp1 = new_register(objectType);
 316     LIR_Opr tmp2 = new_register(objectType);
 317     LIR_Opr tmp3 = new_register(objectType);
 318     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 319 }
 320 
 321 void LIRGenerator::flattened_array_store_check(LIR_Opr value, ciKlass* element_klass, CodeEmitInfo* store_check_info) {
 322   LIR_Opr tmp1 = new_register(T_METADATA);
 323   LIR_Opr tmp2 = new_register(T_METADATA);
 324 
 325   __ metadata2reg(element_klass->constant_encoding(), tmp2);
 326   __ flattened_store_check(value, element_klass, tmp1, tmp2, store_check_info);
 327 }
 328 
 329 
 330 //----------------------------------------------------------------------
 331 //             visitor functions
 332 //----------------------------------------------------------------------
 333 
 334 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 335   assert(x->is_pinned(),"");
 336   LIRItem obj(x->obj(), this);
 337   obj.load_item();
 338 
 339   set_no_result(x);
 340 
 341   // "lock" stores the address of the monitor stack slot, so this is not an oop
 342   LIR_Opr lock = new_register(T_INT);
 343   // Need a scratch register for biased locking
 344   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 345   if (UseBiasedLocking || x->maybe_valuetype()) {
 346     scratch = new_register(T_INT);
 347   }
 348 
 349   CodeEmitInfo* info_for_exception = NULL;
 350   if (x->needs_null_check()) {
 351     info_for_exception = state_for(x);
 352   }
 353 
 354   CodeStub* throw_imse_stub = 
 355       x->maybe_valuetype() ?
 356       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
 357       NULL;
 358 
 359   // this CodeEmitInfo must not have the xhandlers because here the
 360   // object is already locked (xhandlers expect object to be unlocked)
 361   CodeEmitInfo* info = state_for(x, x->state(), true);
 362   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 363                         x->monitor_no(), info_for_exception, info, throw_imse_stub);
 364 }
 365 
 366 
 367 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 368   assert(x->is_pinned(),"");
 369 
 370   LIRItem obj(x->obj(), this);
 371   obj.dont_load_item();
 372 
 373   LIR_Opr lock = new_register(T_INT);
 374   LIR_Opr obj_temp = new_register(T_INT);
 375   set_no_result(x);
 376   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 377 }
 378 
 379 
 380 void LIRGenerator::do_NegateOp(NegateOp* x) {
 381 
 382   LIRItem from(x->x(), this);
 383   from.load_item();


1152 }
1153 
1154 void LIRGenerator::do_NewInstance(NewInstance* x) {
1155 #ifndef PRODUCT
1156   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1157     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1158   }
1159 #endif
1160   CodeEmitInfo* info = state_for(x, x->state());
1161   LIR_Opr reg = result_register_for(x->type());
1162   new_instance(reg, x->klass(), x->is_unresolved(),
1163                        FrameMap::r2_oop_opr,
1164                        FrameMap::r5_oop_opr,
1165                        FrameMap::r4_oop_opr,
1166                        LIR_OprFact::illegalOpr,
1167                        FrameMap::r3_metadata_opr, info);
1168   LIR_Opr result = rlock_result(x);
1169   __ move(reg, result);
1170 }
1171 
1172 void LIRGenerator::do_NewValueTypeInstance  (NewValueTypeInstance* x) {
1173   // Mapping to do_NewInstance (same code)
1174   CodeEmitInfo* info = state_for(x, x->state());
1175   x->set_to_object_type();
1176   LIR_Opr reg = result_register_for(x->type());
1177   new_instance(reg, x->klass(), x->is_unresolved(),
1178              FrameMap::r2_oop_opr,
1179              FrameMap::r5_oop_opr,
1180              FrameMap::r4_oop_opr,
1181              LIR_OprFact::illegalOpr,
1182              FrameMap::r3_metadata_opr, info);
1183   LIR_Opr result = rlock_result(x);
1184   __ move(reg, result);
1185 
1186 }
1187 
1188 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1189   CodeEmitInfo* info = state_for(x, x->state());
1190 
1191   LIRItem length(x->length(), this);
1192   length.load_item_force(FrameMap::r19_opr);
1193 
1194   LIR_Opr reg = result_register_for(x->type());
1195   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1196   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1197   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1198   LIR_Opr tmp4 = reg;
1199   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1200   LIR_Opr len = length.result();
1201   BasicType elem_type = x->elt_type();
1202 
1203   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1204 
1205   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1206   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1207 


1213   LIRItem length(x->length(), this);
1214   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1215   // and therefore provide the state before the parameters have been consumed
1216   CodeEmitInfo* patching_info = NULL;
1217   if (!x->klass()->is_loaded() || PatchALot) {
1218     patching_info =  state_for(x, x->state_before());
1219   }
1220 
1221   CodeEmitInfo* info = state_for(x, x->state());
1222 
1223   LIR_Opr reg = result_register_for(x->type());
1224   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1225   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1226   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1227   LIR_Opr tmp4 = reg;
1228   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1229 
1230   length.load_item_force(FrameMap::r19_opr);
1231   LIR_Opr len = length.result();
1232 
1233   // DMS CHECK: Should we allocate slow path after BAILOUT?
1234   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, false); 
1235 
1236   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1237   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1238     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1239   }
1240   klass2reg_with_patching(klass_reg, obj, patching_info);
1241 
1242   if (obj->is_value_array_klass()) {
1243     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path);
1244   } else {
1245     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1246   }
1247 
1248   LIR_Opr result = rlock_result(x);
1249   __ move(reg, result);
1250 }
1251 
1252 
1253 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1254   Values* dims = x->dims();
1255   int i = dims->length();
1256   LIRItemList* items = new LIRItemList(i, i, NULL);
1257   while (i-- > 0) {
1258     LIRItem* size = new LIRItem(dims->at(i), this);
1259     items->at_put(i, size);
1260   }
1261 
1262   // Evaluate state_for early since it may emit code.
1263   CodeEmitInfo* patching_info = NULL;
1264   if (!x->klass()->is_loaded() || PatchALot) {
1265     patching_info = state_for(x, x->state_before());
1266 


1320       (x->needs_exception_state() ? state_for(x) :
1321                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1322 
1323   CodeStub* stub;
1324   if (x->is_incompatible_class_change_check()) {
1325     assert(patching_info == NULL, "can't patch this");
1326     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1327   } else if (x->is_invokespecial_receiver_check()) {
1328     assert(patching_info == NULL, "can't patch this");
1329     stub = new DeoptimizeStub(info_for_exception,
1330                               Deoptimization::Reason_class_check,
1331                               Deoptimization::Action_none);
1332   } else {
1333     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1334   }
1335   LIR_Opr reg = rlock_result(x);
1336   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1337   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1338     tmp3 = new_register(objectType);
1339   }
1340 
1341 
1342   __ checkcast(reg, obj.result(), x->klass(),
1343                new_register(objectType), new_register(objectType), tmp3,
1344                x->direct_compare(), info_for_exception, patching_info, stub,
1345                x->profiled_method(), x->profiled_bci(), x->is_never_null());
1346 
1347 }
1348 
1349 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1350   LIRItem obj(x->obj(), this);
1351 
1352   // result and test object may not be in same register
1353   LIR_Opr reg = rlock_result(x);
1354   CodeEmitInfo* patching_info = NULL;
1355   if ((!x->klass()->is_loaded() || PatchALot)) {
1356     // must do this before locking the destination register as an oop register
1357     patching_info = state_for(x, x->state_before());
1358   }
1359   obj.load_item();
1360   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1361   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1362     tmp3 = new_register(objectType);
1363   }
1364   __ instanceof(reg, obj.result(), x->klass(),
1365                 new_register(objectType), new_register(objectType), tmp3,
1366                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());


< prev index next >