< prev index next >

src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp

Print this page
rev 53735 : AArch64 support for ValueTypes


  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"

  38 #include "runtime/sharedRuntime.hpp"
  39 #include "runtime/stubRoutines.hpp"
  40 #include "vmreg_aarch64.inline.hpp"
  41 
  42 #ifdef ASSERT
  43 #define __ gen()->lir(__FILE__, __LINE__)->
  44 #else
  45 #define __ gen()->lir()->
  46 #endif
  47 
  48 // Item will be loaded into a byte register; Intel only
  49 void LIRItem::load_byte_item() {
  50   load_item();
  51 }
  52 
  53 
  54 void LIRItem::load_nonconstant() {
  55   LIR_Opr r = value()->operand();
  56   if (r->is_constant()) {
  57     _result = r;


 315     LIR_Opr tmp2 = new_register(objectType);
 316     LIR_Opr tmp3 = new_register(objectType);
 317     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 318 }
 319 
 320 //----------------------------------------------------------------------
 321 //             visitor functions
 322 //----------------------------------------------------------------------
 323 
 324 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 325   assert(x->is_pinned(),"");
 326   LIRItem obj(x->obj(), this);
 327   obj.load_item();
 328 
 329   set_no_result(x);
 330 
 331   // "lock" stores the address of the monitor stack slot, so this is not an oop
 332   LIR_Opr lock = new_register(T_INT);
 333   // Need a scratch register for biased locking
 334   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 335   if (UseBiasedLocking) {
 336     scratch = new_register(T_INT);
 337   }
 338 
 339   CodeEmitInfo* info_for_exception = NULL;
 340   if (x->needs_null_check()) {
 341     info_for_exception = state_for(x);
 342   }






 343   // this CodeEmitInfo must not have the xhandlers because here the
 344   // object is already locked (xhandlers expect object to be unlocked)
 345   CodeEmitInfo* info = state_for(x, x->state(), true);
 346   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 347                         x->monitor_no(), info_for_exception, info);
 348 }
 349 
 350 
 351 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 352   assert(x->is_pinned(),"");
 353 
 354   LIRItem obj(x->obj(), this);
 355   obj.dont_load_item();
 356 
 357   LIR_Opr lock = new_register(T_INT);
 358   LIR_Opr obj_temp = new_register(T_INT);
 359   set_no_result(x);
 360   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 361 }
 362 
 363 
 364 void LIRGenerator::do_NegateOp(NegateOp* x) {
 365 
 366   LIRItem from(x->x(), this);
 367   from.load_item();


1136 }
1137 
1138 void LIRGenerator::do_NewInstance(NewInstance* x) {
1139 #ifndef PRODUCT
1140   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1141     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1142   }
1143 #endif
1144   CodeEmitInfo* info = state_for(x, x->state());
1145   LIR_Opr reg = result_register_for(x->type());
1146   new_instance(reg, x->klass(), x->is_unresolved(),
1147                        FrameMap::r2_oop_opr,
1148                        FrameMap::r5_oop_opr,
1149                        FrameMap::r4_oop_opr,
1150                        LIR_OprFact::illegalOpr,
1151                        FrameMap::r3_metadata_opr, info);
1152   LIR_Opr result = rlock_result(x);
1153   __ move(reg, result);
1154 }
1155 
















1156 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1157   CodeEmitInfo* info = state_for(x, x->state());
1158 
1159   LIRItem length(x->length(), this);
1160   length.load_item_force(FrameMap::r19_opr);
1161 
1162   LIR_Opr reg = result_register_for(x->type());
1163   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1164   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1165   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1166   LIR_Opr tmp4 = reg;
1167   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1168   LIR_Opr len = length.result();
1169   BasicType elem_type = x->elt_type();
1170 
1171   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1172 
1173   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1174   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1175 


1181   LIRItem length(x->length(), this);
1182   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1183   // and therefore provide the state before the parameters have been consumed
1184   CodeEmitInfo* patching_info = NULL;
1185   if (!x->klass()->is_loaded() || PatchALot) {
1186     patching_info =  state_for(x, x->state_before());
1187   }
1188 
1189   CodeEmitInfo* info = state_for(x, x->state());
1190 
1191   LIR_Opr reg = result_register_for(x->type());
1192   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1193   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1194   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1195   LIR_Opr tmp4 = reg;
1196   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1197 
1198   length.load_item_force(FrameMap::r19_opr);
1199   LIR_Opr len = length.result();
1200 
1201   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);


1202   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1203   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1204     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1205   }
1206   klass2reg_with_patching(klass_reg, obj, patching_info);




1207   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);

1208 
1209   LIR_Opr result = rlock_result(x);
1210   __ move(reg, result);
1211 }
1212 
1213 
1214 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1215   Values* dims = x->dims();
1216   int i = dims->length();
1217   LIRItemList* items = new LIRItemList(i, i, NULL);
1218   while (i-- > 0) {
1219     LIRItem* size = new LIRItem(dims->at(i), this);
1220     items->at_put(i, size);
1221   }
1222 
1223   // Evaluate state_for early since it may emit code.
1224   CodeEmitInfo* patching_info = NULL;
1225   if (!x->klass()->is_loaded() || PatchALot) {
1226     patching_info = state_for(x, x->state_before());
1227 


1281       (x->needs_exception_state() ? state_for(x) :
1282                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1283 
1284   CodeStub* stub;
1285   if (x->is_incompatible_class_change_check()) {
1286     assert(patching_info == NULL, "can't patch this");
1287     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1288   } else if (x->is_invokespecial_receiver_check()) {
1289     assert(patching_info == NULL, "can't patch this");
1290     stub = new DeoptimizeStub(info_for_exception,
1291                               Deoptimization::Reason_class_check,
1292                               Deoptimization::Action_none);
1293   } else {
1294     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1295   }
1296   LIR_Opr reg = rlock_result(x);
1297   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1298   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1299     tmp3 = new_register(objectType);
1300   }


1301   __ checkcast(reg, obj.result(), x->klass(),
1302                new_register(objectType), new_register(objectType), tmp3,
1303                x->direct_compare(), info_for_exception, patching_info, stub,
1304                x->profiled_method(), x->profiled_bci());

1305 }
1306 
1307 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1308   LIRItem obj(x->obj(), this);
1309 
1310   // result and test object may not be in same register
1311   LIR_Opr reg = rlock_result(x);
1312   CodeEmitInfo* patching_info = NULL;
1313   if ((!x->klass()->is_loaded() || PatchALot)) {
1314     // must do this before locking the destination register as an oop register
1315     patching_info = state_for(x, x->state_before());
1316   }
1317   obj.load_item();
1318   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1319   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1320     tmp3 = new_register(objectType);
1321   }
1322   __ instanceof(reg, obj.result(), x->klass(),
1323                 new_register(objectType), new_register(objectType), tmp3,
1324                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());




  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "c1/c1_Compilation.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_Instruction.hpp"
  31 #include "c1/c1_LIRAssembler.hpp"
  32 #include "c1/c1_LIRGenerator.hpp"
  33 #include "c1/c1_Runtime1.hpp"
  34 #include "c1/c1_ValueStack.hpp"
  35 #include "ci/ciArray.hpp"
  36 #include "ci/ciObjArrayKlass.hpp"
  37 #include "ci/ciTypeArrayKlass.hpp"
  38 #include "ci/ciValueKlass.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "vmreg_aarch64.inline.hpp"
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // Item will be loaded into a byte register; Intel only
  50 void LIRItem::load_byte_item() {
  51   load_item();
  52 }
  53 
  54 
  55 void LIRItem::load_nonconstant() {
  56   LIR_Opr r = value()->operand();
  57   if (r->is_constant()) {
  58     _result = r;


 316     LIR_Opr tmp2 = new_register(objectType);
 317     LIR_Opr tmp3 = new_register(objectType);
 318     __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
 319 }
 320 
 321 //----------------------------------------------------------------------
 322 //             visitor functions
 323 //----------------------------------------------------------------------
 324 
 325 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
 326   assert(x->is_pinned(),"");
 327   LIRItem obj(x->obj(), this);
 328   obj.load_item();
 329 
 330   set_no_result(x);
 331 
 332   // "lock" stores the address of the monitor stack slot, so this is not an oop
 333   LIR_Opr lock = new_register(T_INT);
 334   // Need a scratch register for biased locking
 335   LIR_Opr scratch = LIR_OprFact::illegalOpr;
 336   if (UseBiasedLocking || x->maybe_valuetype()) {
 337     scratch = new_register(T_INT);
 338   }
 339 
 340   CodeEmitInfo* info_for_exception = NULL;
 341   if (x->needs_null_check()) {
 342     info_for_exception = state_for(x);
 343   }
 344 
 345   CodeStub* throw_imse_stub = 
 346       x->maybe_valuetype() ?
 347       new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
 348       NULL;
 349 
 350   // this CodeEmitInfo must not have the xhandlers because here the
 351   // object is already locked (xhandlers expect object to be unlocked)
 352   CodeEmitInfo* info = state_for(x, x->state(), true);
 353   monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
 354                         x->monitor_no(), info_for_exception, info, throw_imse_stub);
 355 }
 356 
 357 
 358 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
 359   assert(x->is_pinned(),"");
 360 
 361   LIRItem obj(x->obj(), this);
 362   obj.dont_load_item();
 363 
 364   LIR_Opr lock = new_register(T_INT);
 365   LIR_Opr obj_temp = new_register(T_INT);
 366   set_no_result(x);
 367   monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
 368 }
 369 
 370 
 371 void LIRGenerator::do_NegateOp(NegateOp* x) {
 372 
 373   LIRItem from(x->x(), this);
 374   from.load_item();


1143 }
1144 
1145 void LIRGenerator::do_NewInstance(NewInstance* x) {
1146 #ifndef PRODUCT
1147   if (PrintNotLoaded && !x->klass()->is_loaded()) {
1148     tty->print_cr("   ###class not loaded at new bci %d", x->printable_bci());
1149   }
1150 #endif
1151   CodeEmitInfo* info = state_for(x, x->state());
1152   LIR_Opr reg = result_register_for(x->type());
1153   new_instance(reg, x->klass(), x->is_unresolved(),
1154                        FrameMap::r2_oop_opr,
1155                        FrameMap::r5_oop_opr,
1156                        FrameMap::r4_oop_opr,
1157                        LIR_OprFact::illegalOpr,
1158                        FrameMap::r3_metadata_opr, info);
1159   LIR_Opr result = rlock_result(x);
1160   __ move(reg, result);
1161 }
1162 
1163 void LIRGenerator::do_NewValueTypeInstance  (NewValueTypeInstance* x) {
1164   // Mapping to do_NewInstance (same code)
1165   CodeEmitInfo* info = state_for(x, x->state());
1166   x->set_to_object_type();
1167   LIR_Opr reg = result_register_for(x->type());
1168   new_instance(reg, x->klass(), x->is_unresolved(),
1169              FrameMap::r2_oop_opr,
1170              FrameMap::r5_oop_opr,
1171              FrameMap::r4_oop_opr,
1172              LIR_OprFact::illegalOpr,
1173              FrameMap::r3_metadata_opr, info);
1174   LIR_Opr result = rlock_result(x);
1175   __ move(reg, result);
1176 
1177 }
1178 
1179 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1180   CodeEmitInfo* info = state_for(x, x->state());
1181 
1182   LIRItem length(x->length(), this);
1183   length.load_item_force(FrameMap::r19_opr);
1184 
1185   LIR_Opr reg = result_register_for(x->type());
1186   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1187   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1188   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1189   LIR_Opr tmp4 = reg;
1190   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1191   LIR_Opr len = length.result();
1192   BasicType elem_type = x->elt_type();
1193 
1194   __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1195 
1196   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1197   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
1198 


1204   LIRItem length(x->length(), this);
1205   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
1206   // and therefore provide the state before the parameters have been consumed
1207   CodeEmitInfo* patching_info = NULL;
1208   if (!x->klass()->is_loaded() || PatchALot) {
1209     patching_info =  state_for(x, x->state_before());
1210   }
1211 
1212   CodeEmitInfo* info = state_for(x, x->state());
1213 
1214   LIR_Opr reg = result_register_for(x->type());
1215   LIR_Opr tmp1 = FrameMap::r2_oop_opr;
1216   LIR_Opr tmp2 = FrameMap::r4_oop_opr;
1217   LIR_Opr tmp3 = FrameMap::r5_oop_opr;
1218   LIR_Opr tmp4 = reg;
1219   LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
1220 
1221   length.load_item_force(FrameMap::r19_opr);
1222   LIR_Opr len = length.result();
1223 
1224   // DMS CHECK: Should we allocate slow path after BAILOUT?
1225   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, false); 
1226 
1227   ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1228   if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1229     BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1230   }
1231   klass2reg_with_patching(klass_reg, obj, patching_info);
1232 
1233   if (obj->is_value_array_klass()) {
1234     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path);
1235   } else {
1236     __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
1237   }
1238 
1239   LIR_Opr result = rlock_result(x);
1240   __ move(reg, result);
1241 }
1242 
1243 
1244 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1245   Values* dims = x->dims();
1246   int i = dims->length();
1247   LIRItemList* items = new LIRItemList(i, i, NULL);
1248   while (i-- > 0) {
1249     LIRItem* size = new LIRItem(dims->at(i), this);
1250     items->at_put(i, size);
1251   }
1252 
1253   // Evaluate state_for early since it may emit code.
1254   CodeEmitInfo* patching_info = NULL;
1255   if (!x->klass()->is_loaded() || PatchALot) {
1256     patching_info = state_for(x, x->state_before());
1257 


1311       (x->needs_exception_state() ? state_for(x) :
1312                                     state_for(x, x->state_before(), true /*ignore_xhandler*/));
1313 
1314   CodeStub* stub;
1315   if (x->is_incompatible_class_change_check()) {
1316     assert(patching_info == NULL, "can't patch this");
1317     stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1318   } else if (x->is_invokespecial_receiver_check()) {
1319     assert(patching_info == NULL, "can't patch this");
1320     stub = new DeoptimizeStub(info_for_exception,
1321                               Deoptimization::Reason_class_check,
1322                               Deoptimization::Action_none);
1323   } else {
1324     stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1325   }
1326   LIR_Opr reg = rlock_result(x);
1327   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1328   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1329     tmp3 = new_register(objectType);
1330   }
1331 
1332 
1333   __ checkcast(reg, obj.result(), x->klass(),
1334                new_register(objectType), new_register(objectType), tmp3,
1335                x->direct_compare(), info_for_exception, patching_info, stub,
1336                x->profiled_method(), x->profiled_bci(), x->is_never_null());
1337 
1338 }
1339 
1340 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1341   LIRItem obj(x->obj(), this);
1342 
1343   // result and test object may not be in same register
1344   LIR_Opr reg = rlock_result(x);
1345   CodeEmitInfo* patching_info = NULL;
1346   if ((!x->klass()->is_loaded() || PatchALot)) {
1347     // must do this before locking the destination register as an oop register
1348     patching_info = state_for(x, x->state_before());
1349   }
1350   obj.load_item();
1351   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1352   if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
1353     tmp3 = new_register(objectType);
1354   }
1355   __ instanceof(reg, obj.result(), x->klass(),
1356                 new_register(objectType), new_register(objectType), tmp3,
1357                 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());


< prev index next >