< prev index next >
src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
Print this page
*** 1,7 ****
/*
! * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
--- 1,7 ----
/*
! * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*** 197,207 ****
__ push_reg(opr->as_register_lo());
} else if (opr->is_stack()) {
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
} else if (opr->is_constant()) {
LIR_Const* const_opr = opr->as_constant_ptr();
! if (const_opr->type() == T_OBJECT) {
__ push_oop(const_opr->as_jobject());
} else if (const_opr->type() == T_INT) {
__ push_jint(const_opr->as_jint());
} else {
ShouldNotReachHere();
--- 197,207 ----
__ push_reg(opr->as_register_lo());
} else if (opr->is_stack()) {
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
} else if (opr->is_constant()) {
LIR_Const* const_opr = opr->as_constant_ptr();
! if (const_opr->type() == T_OBJECT || const_opr->type() == T_VALUETYPE) {
__ push_oop(const_opr->as_jobject());
} else if (const_opr->type() == T_INT) {
__ push_jint(const_opr->as_jint());
} else {
ShouldNotReachHere();
*** 627,636 ****
--- 627,637 ----
__ movptr(dest->as_register_hi(), c->as_jint_hi());
#endif // _LP64
break;
}
+ case T_VALUETYPE: // Fall through
case T_OBJECT: {
if (patch_code != lir_patch_none) {
jobject2reg_with_patching(dest->as_register(), info);
} else {
__ movoop(dest->as_register(), c->as_jobject());
*** 709,718 ****
--- 710,720 ----
case T_ADDRESS:
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
break;
+ case T_VALUETYPE: // Fall through
case T_OBJECT:
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
break;
case T_LONG: // fall through
*** 748,757 ****
--- 750,760 ----
case T_ADDRESS:
__ movptr(as_Address(addr), c->as_jint_bits());
break;
+ case T_VALUETYPE: // fall through
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
if (UseCompressedOops && !wide) {
__ movl(as_Address(addr), (int32_t)NULL_WORD);
*** 836,853 ****
move_regs(src->as_register_lo(), dest->as_register());
return;
}
#endif
assert(src->is_single_cpu(), "must match");
! if (src->type() == T_OBJECT) {
__ verify_oop(src->as_register());
}
move_regs(src->as_register(), dest->as_register());
} else if (dest->is_double_cpu()) {
#ifdef _LP64
! if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
// Surprising to me but we can see move of a long to t_object
__ verify_oop(src->as_register());
move_regs(src->as_register(), dest->as_register_lo());
return;
}
--- 839,856 ----
move_regs(src->as_register_lo(), dest->as_register());
return;
}
#endif
assert(src->is_single_cpu(), "must match");
! if (src->type() == T_OBJECT || src->type() == T_VALUETYPE) {
__ verify_oop(src->as_register());
}
move_regs(src->as_register(), dest->as_register());
} else if (dest->is_double_cpu()) {
#ifdef _LP64
! if (src->type() == T_OBJECT || src->type() == T_ARRAY || src->type() == T_VALUETYPE) {
// Surprising to me but we can see move of a long to t_object
__ verify_oop(src->as_register());
move_regs(src->as_register(), dest->as_register_lo());
return;
}
*** 914,924 ****
assert(src->is_register(), "should not call otherwise");
assert(dest->is_stack(), "should not call otherwise");
if (src->is_single_cpu()) {
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
! if (type == T_OBJECT || type == T_ARRAY) {
__ verify_oop(src->as_register());
__ movptr (dst, src->as_register());
} else if (type == T_METADATA) {
__ movptr (dst, src->as_register());
} else {
--- 917,927 ----
assert(src->is_register(), "should not call otherwise");
assert(dest->is_stack(), "should not call otherwise");
if (src->is_single_cpu()) {
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
! if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) {
__ verify_oop(src->as_register());
__ movptr (dst, src->as_register());
} else if (type == T_METADATA) {
__ movptr (dst, src->as_register());
} else {
*** 960,970 ****
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
LIR_Address* to_addr = dest->as_address_ptr();
PatchingStub* patch = NULL;
Register compressed_src = rscratch1;
! if (type == T_ARRAY || type == T_OBJECT) {
__ verify_oop(src->as_register());
#ifdef _LP64
if (UseCompressedOops && !wide) {
__ movptr(compressed_src, src->as_register());
__ encode_heap_oop(compressed_src);
--- 963,973 ----
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
LIR_Address* to_addr = dest->as_address_ptr();
PatchingStub* patch = NULL;
Register compressed_src = rscratch1;
! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
__ verify_oop(src->as_register());
#ifdef _LP64
if (UseCompressedOops && !wide) {
__ movptr(compressed_src, src->as_register());
__ encode_heap_oop(compressed_src);
*** 1005,1014 ****
--- 1008,1018 ----
else __ fst_d (as_Address(to_addr));
}
break;
}
+ case T_VALUETYPE: // fall through
case T_ARRAY: // fall through
case T_OBJECT: // fall through
if (UseCompressedOops && !wide) {
__ movl(as_Address(to_addr), compressed_src);
} else {
*** 1095,1105 ****
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
assert(src->is_stack(), "should not call otherwise");
assert(dest->is_register(), "should not call otherwise");
if (dest->is_single_cpu()) {
! if (type == T_ARRAY || type == T_OBJECT) {
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
__ verify_oop(dest->as_register());
} else if (type == T_METADATA) {
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
} else {
--- 1099,1109 ----
void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
assert(src->is_stack(), "should not call otherwise");
assert(dest->is_register(), "should not call otherwise");
if (dest->is_single_cpu()) {
! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
__ verify_oop(dest->as_register());
} else if (type == T_METADATA) {
__ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
} else {
*** 1136,1146 ****
}
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
if (src->is_single_stack()) {
! if (type == T_OBJECT || type == T_ARRAY) {
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
} else {
#ifndef _LP64
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
--- 1140,1150 ----
}
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
if (src->is_single_stack()) {
! if (type == T_OBJECT || type == T_ARRAY || type == T_VALUETYPE) {
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
} else {
#ifndef _LP64
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
*** 1175,1185 ****
assert(dest->is_register(), "should not call otherwise");
LIR_Address* addr = src->as_address_ptr();
Address from_addr = as_Address(addr);
! if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register());
}
switch (type) {
case T_BOOLEAN: // fall through
--- 1179,1189 ----
assert(dest->is_register(), "should not call otherwise");
LIR_Address* addr = src->as_address_ptr();
Address from_addr = as_Address(addr);
! if (addr->base()->type() == T_OBJECT || addr->base()->type() == T_VALUETYPE) {
__ verify_oop(addr->base()->as_pointer_register());
}
switch (type) {
case T_BOOLEAN: // fall through
*** 1228,1237 ****
--- 1232,1242 ----
__ fld_d(from_addr);
}
break;
}
+ case T_VALUETYPE: // fall through
case T_OBJECT: // fall through
case T_ARRAY: // fall through
if (UseCompressedOops && !wide) {
__ movl(dest->as_register(), from_addr);
} else {
*** 1337,1347 ****
if (patch != NULL) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
}
! if (type == T_ARRAY || type == T_OBJECT) {
#ifdef _LP64
if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
#endif
--- 1342,1352 ----
if (patch != NULL) {
patching_epilog(patch, patch_code, addr->base()->as_register(), info);
}
! if (type == T_ARRAY || type == T_OBJECT || type == T_VALUETYPE) {
#ifdef _LP64
if (UseCompressedOops && !wide) {
__ decode_heap_oop(dest->as_register());
}
#endif
*** 1574,1584 ****
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
Register len = op->len()->as_register();
LP64_ONLY( __ movslq(len, len); )
! if (UseSlowPath ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
__ jmp(*op->stub()->entry());
} else {
Register tmp1 = op->tmp1()->as_register();
--- 1579,1589 ----
void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
Register len = op->len()->as_register();
LP64_ONLY( __ movslq(len, len); )
! if (UseSlowPath || op->type() == T_VALUETYPE ||
(!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) ||
(!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) {
__ jmp(*op->stub()->entry());
} else {
Register tmp1 = op->tmp1()->as_register();
*** 1672,1681 ****
--- 1677,1687 ----
select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
}
assert_different_registers(obj, k_RInfo, klass_RInfo);
+ if (op->need_null_check()) {
__ cmpptr(obj, (int32_t)NULL_WORD);
if (op->should_profile()) {
Label not_null;
__ jccb(Assembler::notEqual, not_null);
// Object is null; update MDO and exit
*** 1687,1696 ****
--- 1693,1703 ----
__ jmp(*obj_is_null);
__ bind(not_null);
} else {
__ jcc(Assembler::equal, *obj_is_null);
}
+ }
if (!k->is_loaded()) {
klass2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
#ifdef _LP64
*** 1896,1905 ****
--- 1903,1932 ----
ShouldNotReachHere();
}
}
+ void LIR_Assembler::emit_opFlattenedStoreCheck(LIR_OpFlattenedStoreCheck* op) {
+ Klass* k = (Klass*)(op->element_klass()->constant_encoding());
+ assert(k->is_klass(), "must be a loaded klass");
+ add_debug_info_for_null_check_here(op->info_for_exception());
+
+ #ifdef _LP64
+ if (UseCompressedClassPointers) {
+ __ movl(op->tmp1()->as_register(), Address(op->object()->as_register(), oopDesc::klass_offset_in_bytes()));
+ __ cmp_narrow_klass(op->tmp1()->as_register(), k);
+ } else {
+ __ movq(op->tmp1()->as_register(), Address(op->object()->as_register(), oopDesc::klass_offset_in_bytes()));
+ __ cmpq(op->tmp1()->as_register(), op->tmp2()->as_register());
+ }
+ #else
+ Unimplemented(); // FIXME
+ #endif
+
+ __ jcc(Assembler::notEqual, *op->stub()->entry());
+ __ bind(*op->stub()->continuation());
+ }
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
*** 2492,2502 ****
}
#endif // _LP64
} else {
#ifdef _LP64
Register r_lo;
! if (right->type() == T_OBJECT || right->type() == T_ARRAY) {
r_lo = right->as_register();
} else {
r_lo = right->as_register_lo();
}
#else
--- 2519,2529 ----
}
#endif // _LP64
} else {
#ifdef _LP64
Register r_lo;
! if (right->type() == T_OBJECT || right->type() == T_ARRAY || right->type() == T_VALUETYPE) {
r_lo = right->as_register();
} else {
r_lo = right->as_register_lo();
}
#else
*** 2605,2633 ****
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
if (opr1->is_single_cpu()) {
Register reg1 = opr1->as_register();
if (opr2->is_single_cpu()) {
// cpu register - cpu register
! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
__ cmpoop(reg1, opr2->as_register());
} else {
! assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
__ cmpl(reg1, opr2->as_register());
}
} else if (opr2->is_stack()) {
// cpu register - stack
! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
__ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
} else {
__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
}
} else if (opr2->is_constant()) {
// cpu register - constant
LIR_Const* c = opr2->as_constant_ptr();
if (c->type() == T_INT) {
__ cmpl(reg1, c->as_jint());
! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
// In 64bit oops are single register
jobject o = c->as_jobject();
if (o == NULL) {
__ cmpptr(reg1, (int32_t)NULL_WORD);
} else {
--- 2632,2660 ----
void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) {
if (opr1->is_single_cpu()) {
Register reg1 = opr1->as_register();
if (opr2->is_single_cpu()) {
// cpu register - cpu register
! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
__ cmpoop(reg1, opr2->as_register());
} else {
! assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY && opr2->type() != T_VALUETYPE, "cmp int, oop?");
__ cmpl(reg1, opr2->as_register());
}
} else if (opr2->is_stack()) {
// cpu register - stack
! if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY || opr1->type() == T_VALUETYPE) {
__ cmpoop(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
} else {
__ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
}
} else if (opr2->is_constant()) {
// cpu register - constant
LIR_Const* c = opr2->as_constant_ptr();
if (c->type() == T_INT) {
__ cmpl(reg1, c->as_jint());
! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
// In 64bit oops are single register
jobject o = c->as_jobject();
if (o == NULL) {
__ cmpptr(reg1, (int32_t)NULL_WORD);
} else {
*** 2723,2733 ****
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
} else if (opr1->is_address() && opr2->is_constant()) {
LIR_Const* c = opr2->as_constant_ptr();
#ifdef _LP64
! if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
__ movoop(rscratch1, c->as_jobject());
}
#endif // LP64
if (op->info() != NULL) {
--- 2750,2760 ----
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
} else if (opr1->is_address() && opr2->is_constant()) {
LIR_Const* c = opr2->as_constant_ptr();
#ifdef _LP64
! if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
__ movoop(rscratch1, c->as_jobject());
}
#endif // LP64
if (op->info() != NULL) {
*** 2735,2745 ****
}
// special case: address - constant
LIR_Address* addr = opr1->as_address_ptr();
if (c->type() == T_INT) {
__ cmpl(as_Address(addr), c->as_jint());
! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
#ifdef _LP64
// %%% Make this explode if addr isn't reachable until we figure out a
// better strategy by giving noreg as the temp for as_Address
__ cmpoop(rscratch1, as_Address(addr, noreg));
#else
--- 2762,2772 ----
}
// special case: address - constant
LIR_Address* addr = opr1->as_address_ptr();
if (c->type() == T_INT) {
__ cmpl(as_Address(addr), c->as_jint());
! } else if (c->type() == T_OBJECT || c->type() == T_ARRAY || c->type() == T_VALUETYPE) {
#ifdef _LP64
// %%% Make this explode if addr isn't reachable until we figure out a
// better strategy by giving noreg as the temp for as_Address
__ cmpoop(rscratch1, as_Address(addr, noreg));
#else
*** 3016,3025 ****
--- 3043,3067 ----
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
__ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
}
+ void LIR_Assembler::arraycopy_flat_check(Register obj, Register tmp, CodeStub* slow_path) {
+ Address klass_addr = Address(obj, oopDesc::klass_offset_in_bytes());
+ if (UseCompressedClassPointers) {
+ __ movl(tmp, klass_addr);
+ LP64_ONLY(__ decode_klass_not_null(tmp));
+ } else {
+ __ movptr(tmp, klass_addr);
+ }
+ __ movl(tmp, Address(tmp, Klass::layout_helper_offset()));
+ __ sarl(tmp, Klass::_lh_array_tag_shift);
+ __ cmpl(tmp, Klass::_lh_array_tag_vt_value);
+ __ jcc(Assembler::equal, *slow_path->entry());
+ }
+
+
// This code replaces a call to arraycopy; no exception may
// be thrown in this code, they must be thrown in the System.arraycopy
// activation frame; we could save some checks if this would not be the case
void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
ciArrayKlass* default_type = op->expected_type();
*** 3036,3045 ****
--- 3078,3111 ----
CodeStub* stub = op->stub();
int flags = op->flags();
BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
if (basic_type == T_ARRAY) basic_type = T_OBJECT;
+ if (flags & LIR_OpArrayCopy::always_slow_path) {
+ __ jmp(*stub->entry());
+ __ bind(*stub->continuation());
+ return;
+ }
+
+ if (flags & LIR_OpArrayCopy::src_flat_check) {
+ arraycopy_flat_check(src, tmp, stub);
+ }
+
+ if (flags & LIR_OpArrayCopy::dst_flat_check) {
+ arraycopy_flat_check(dst, tmp, stub);
+ }
+
+ if (basic_type == T_VALUETYPE) {
+ assert(flags & (LIR_OpArrayCopy::always_slow_path |
+ LIR_OpArrayCopy::src_flat_check |
+ LIR_OpArrayCopy::dst_flat_check), "must have checked");
+ // If either src or dst is (or maybe) a flattened array, one of the 3 checks
+ // above would have caught it, and taken the slow path. So when we come here,
+ // the array must be a (non-flat) object array.
+ basic_type = T_OBJECT;
+ }
+
// if we don't know anything, just go through the generic arraycopy
if (default_type == NULL) {
// save outgoing arguments on stack in case call to System.arraycopy is needed
// HACK ALERT. This code used to push the parameters in a hardwired fashion
// for interpreter calling conventions. Now we have to do it in new style conventions.
< prev index next >