< prev index next >
src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
Print this page
rev 53735 : AArch64 support for ValueTypes
*** 33,42 ****
--- 33,43 ----
#include "c1/c1_Runtime1.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArray.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
+ #include "ci/ciValueKlass.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_aarch64.inline.hpp"
#ifdef ASSERT
*** 330,352 ****
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
// Need a scratch register for biased locking
LIR_Opr scratch = LIR_OprFact::illegalOpr;
! if (UseBiasedLocking) {
scratch = new_register(T_INT);
}
CodeEmitInfo* info_for_exception = NULL;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
! x->monitor_no(), info_for_exception, info);
}
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
assert(x->is_pinned(),"");
--- 331,359 ----
// "lock" stores the address of the monitor stack slot, so this is not an oop
LIR_Opr lock = new_register(T_INT);
// Need a scratch register for biased locking
LIR_Opr scratch = LIR_OprFact::illegalOpr;
! if (UseBiasedLocking || x->maybe_valuetype()) {
scratch = new_register(T_INT);
}
CodeEmitInfo* info_for_exception = NULL;
if (x->needs_null_check()) {
info_for_exception = state_for(x);
}
+
+ CodeStub* throw_imse_stub =
+ x->maybe_valuetype() ?
+ new SimpleExceptionStub(Runtime1::throw_illegal_monitor_state_exception_id, LIR_OprFact::illegalOpr, state_for(x)) :
+ NULL;
+
// this CodeEmitInfo must not have the xhandlers because here the
// object is already locked (xhandlers expect object to be unlocked)
CodeEmitInfo* info = state_for(x, x->state(), true);
monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
! x->monitor_no(), info_for_exception, info, throw_imse_stub);
}
void LIRGenerator::do_MonitorExit(MonitorExit* x) {
assert(x->is_pinned(),"");
*** 1151,1160 ****
--- 1158,1183 ----
FrameMap::r3_metadata_opr, info);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
+ void LIRGenerator::do_NewValueTypeInstance (NewValueTypeInstance* x) {
+ // Mapping to do_NewInstance (same code)
+ CodeEmitInfo* info = state_for(x, x->state());
+ x->set_to_object_type();
+ LIR_Opr reg = result_register_for(x->type());
+ new_instance(reg, x->klass(), x->is_unresolved(),
+ FrameMap::r2_oop_opr,
+ FrameMap::r5_oop_opr,
+ FrameMap::r4_oop_opr,
+ LIR_OprFact::illegalOpr,
+ FrameMap::r3_metadata_opr, info);
+ LIR_Opr result = rlock_result(x);
+ __ move(reg, result);
+
+ }
+
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
CodeEmitInfo* info = state_for(x, x->state());
LIRItem length(x->length(), this);
length.load_item_force(FrameMap::r19_opr);
*** 1196,1212 ****
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
length.load_item_force(FrameMap::r19_opr);
LIR_Opr len = length.result();
! CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
--- 1219,1242 ----
LIR_Opr klass_reg = FrameMap::r3_metadata_opr;
length.load_item_force(FrameMap::r19_opr);
LIR_Opr len = length.result();
! // DMS CHECK: Should we allocate slow path after BAILOUT?
! CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info, false);
!
ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
if (obj == ciEnv::unloaded_ciobjarrayklass()) {
BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
}
klass2reg_with_patching(klass_reg, obj, patching_info);
+
+ if (obj->is_value_array_klass()) {
+ __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_VALUETYPE, klass_reg, slow_path);
+ } else {
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
+ }
LIR_Opr result = rlock_result(x);
__ move(reg, result);
}
*** 1296,1309 ****
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
! x->profiled_method(), x->profiled_bci());
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
--- 1326,1342 ----
LIR_Opr reg = rlock_result(x);
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
tmp3 = new_register(objectType);
}
+
+
__ checkcast(reg, obj.result(), x->klass(),
new_register(objectType), new_register(objectType), tmp3,
x->direct_compare(), info_for_exception, patching_info, stub,
! x->profiled_method(), x->profiled_bci(), x->is_never_null());
!
}
void LIRGenerator::do_InstanceOf(InstanceOf* x) {
LIRItem obj(x->obj(), this);
< prev index next >