< prev index next >
src/hotspot/share/c1/c1_LIR.hpp
Print this page
@@ -314,10 +314,11 @@
case T_BYTE:
case T_SHORT:
case T_INT:
case T_ADDRESS:
case T_OBJECT:
+ case T_VALUETYPE:
case T_ARRAY:
case T_METADATA:
return single_size;
break;
@@ -464,10 +465,11 @@
case T_INT: return LIR_OprDesc::int_type;
case T_LONG: return LIR_OprDesc::long_type;
case T_FLOAT: return LIR_OprDesc::float_type;
case T_DOUBLE: return LIR_OprDesc::double_type;
case T_OBJECT:
+ case T_VALUETYPE:
case T_ARRAY: return LIR_OprDesc::object_type;
case T_ADDRESS: return LIR_OprDesc::address_type;
case T_METADATA: return LIR_OprDesc::metadata_type;
case T_ILLEGAL: // fall through
default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type;
@@ -649,10 +651,11 @@
static LIR_Opr virtual_register(int index, BasicType type) {
LIR_Opr res;
switch (type) {
case T_OBJECT: // fall through
+ case T_VALUETYPE: // fall through
case T_ARRAY:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::object_type |
LIR_OprDesc::cpu_register |
LIR_OprDesc::single_size |
@@ -754,10 +757,11 @@
// the index is platform independent; a double stack useing indeces 2 and 3 has always
// index 2.
static LIR_Opr stack(int index, BasicType type) {
LIR_Opr res;
switch (type) {
+ case T_VALUETYPE: // fall through
case T_OBJECT: // fall through
case T_ARRAY:
res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
LIR_OprDesc::object_type |
LIR_OprDesc::stack_value |
@@ -867,10 +871,11 @@
class LIR_OpRTCall;
class LIR_OpArrayCopy;
class LIR_OpUpdateCRC32;
class LIR_OpLock;
class LIR_OpTypeCheck;
+class LIR_OpFlattenedStoreCheck;
class LIR_OpCompareAndSwap;
class LIR_OpProfileCall;
class LIR_OpProfileType;
#ifdef ASSERT
class LIR_OpAssert;
@@ -981,10 +986,13 @@
, begin_opTypeCheck
, lir_instanceof
, lir_checkcast
, lir_store_check
, end_opTypeCheck
+ , begin_opFlattenedStoreCheck
+ , lir_flattened_store_check
+ , end_opFlattenedStoreCheck
, begin_opCompareAndSwap
, lir_cas_long
, lir_cas_obj
, lir_cas_int
, end_opCompareAndSwap
@@ -1131,10 +1139,11 @@
virtual LIR_Op2* as_Op2() { return NULL; }
virtual LIR_Op3* as_Op3() { return NULL; }
virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; }
virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; }
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
+ virtual LIR_OpFlattenedStoreCheck* as_OpFlattenedStoreCheck() { return NULL; }
virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
#ifdef ASSERT
virtual LIR_OpAssert* as_OpAssert() { return NULL; }
@@ -1262,11 +1271,14 @@
type_check = 1 << 7,
overlapping = 1 << 8,
unaligned = 1 << 9,
src_objarray = 1 << 10,
dst_objarray = 1 << 11,
- all_flags = (1 << 12) - 1
+ always_slow_path = 1 << 12,
+ src_flat_check = 1 << 13,
+ dst_flat_check = 1 << 14,
+ all_flags = (1 << 15) - 1
};
LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp,
ciArrayKlass* expected_type, int flags, CodeEmitInfo* info);
@@ -1555,15 +1567,16 @@
CodeEmitInfo* _info_for_exception;
CodeStub* _stub;
ciMethod* _profiled_method;
int _profiled_bci;
bool _should_profile;
+ bool _need_null_check;
public:
LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
- CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
+ CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, bool need_null_check = true);
LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
LIR_Opr object() const { return _object; }
LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; }
@@ -1581,17 +1594,45 @@
void set_profiled_bci(int bci) { _profiled_bci = bci; }
void set_should_profile(bool b) { _should_profile = b; }
ciMethod* profiled_method() const { return _profiled_method; }
int profiled_bci() const { return _profiled_bci; }
bool should_profile() const { return _should_profile; }
-
+ bool need_null_check() const { return _need_null_check; }
virtual bool is_patching() { return _info_for_patch != NULL; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
+// LIR_OpFlattenedStoreCheck
+class LIR_OpFlattenedStoreCheck: public LIR_Op {
+ friend class LIR_OpVisitState;
+
+ private:
+ LIR_Opr _object;
+ ciKlass* _element_klass;
+ LIR_Opr _tmp1;
+ LIR_Opr _tmp2;
+ CodeEmitInfo* _info_for_exception;
+ CodeStub* _stub;
+
+public:
+ LIR_OpFlattenedStoreCheck(LIR_Opr object, ciKlass* element_klass, LIR_Opr tmp1, LIR_Opr tmp2,
+ CodeEmitInfo* info_for_exception);
+
+ LIR_Opr object() const { return _object; }
+ LIR_Opr tmp1() const { return _tmp1; }
+ LIR_Opr tmp2() const { return _tmp2; }
+ ciKlass* element_klass() const { return _element_klass; }
+ CodeEmitInfo* info_for_exception() const { return _info_for_exception; }
+ CodeStub* stub() const { return _stub; }
+
+ virtual void emit_code(LIR_Assembler* masm);
+ virtual LIR_OpFlattenedStoreCheck* as_OpFlattenedStoreCheck() { return this; }
+ virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
+};
+
// LIR_Op2
class LIR_Op2: public LIR_Op {
friend class LIR_OpVisitState;
int _fpu_stack_size; // for sin/cos implementation on Intel
@@ -1780,24 +1821,27 @@
LIR_Opr _hdr;
LIR_Opr _obj;
LIR_Opr _lock;
LIR_Opr _scratch;
CodeStub* _stub;
+ CodeStub* _throw_imse_stub;
public:
- LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info)
+ LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub=NULL)
: LIR_Op(code, LIR_OprFact::illegalOpr, info)
, _hdr(hdr)
, _obj(obj)
, _lock(lock)
, _scratch(scratch)
- , _stub(stub) {}
+ , _stub(stub)
+ , _throw_imse_stub(throw_imse_stub) {}
LIR_Opr hdr_opr() const { return _hdr; }
LIR_Opr obj_opr() const { return _obj; }
LIR_Opr lock_opr() const { return _lock; }
LIR_Opr scratch_opr() const { return _scratch; }
CodeStub* stub() const { return _stub; }
+ CodeStub* throw_imse_stub() const { return _throw_imse_stub; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpLock* as_OpLock() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
};
@@ -2228,11 +2272,11 @@
append(new LIR_OpRTCall(routine, tmp, result, arguments, info));
}
void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); }
void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub);
- void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info);
+ void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info, CodeStub* throw_imse_stub=NULL);
void set_24bit_fpu() { append(new LIR_Op0(lir_24bit_FPU )); }
void restore_fpu() { append(new LIR_Op0(lir_reset_FPU )); }
void breakpoint() { append(new LIR_Op0(lir_breakpoint)); }
@@ -2242,15 +2286,16 @@
void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); }
void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci);
+ void flattened_store_check(LIR_Opr object, ciKlass* element_klass, LIR_Opr tmp1, LIR_Opr tmp2, CodeEmitInfo* info_for_exception);
void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
- ciMethod* profiled_method, int profiled_bci);
+ ciMethod* profiled_method, int profiled_bci, bool is_never_null);
// MethodData* profiling
void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
}
void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
< prev index next >