< prev index next >
src/hotspot/share/opto/library_call.cpp
Print this page
@@ -50,10 +50,11 @@
#include "opto/opaquenode.hpp"
#include "opto/parse.hpp"
#include "opto/runtime.hpp"
#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"
+#include "opto/valuetypenode.hpp"
#include "prims/nativeLookup.hpp"
#include "prims/unsafe.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
@@ -161,11 +162,10 @@
Node* array_length,
RegionNode* region);
void generate_string_range_check(Node* array, Node* offset,
Node* length, bool char_count);
Node* generate_current_thread(Node* &tls_output);
- Node* load_mirror_from_klass(Node* klass);
Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null,
RegionNode* region, int null_path,
int offset);
Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
RegionNode* region, int null_path) {
@@ -183,24 +183,40 @@
}
Node* generate_access_flags_guard(Node* kls,
int modifier_mask, int modifier_bits,
RegionNode* region);
Node* generate_interface_guard(Node* kls, RegionNode* region);
+ Node* generate_value_guard(Node* kls, RegionNode* region);
+
+ enum ArrayKind {
+ AnyArray,
+ NonArray,
+ ObjectArray,
+ NonObjectArray,
+ TypeArray,
+ ValueArray
+ };
+
Node* generate_array_guard(Node* kls, RegionNode* region) {
- return generate_array_guard_common(kls, region, false, false);
+ return generate_array_guard_common(kls, region, AnyArray);
}
Node* generate_non_array_guard(Node* kls, RegionNode* region) {
- return generate_array_guard_common(kls, region, false, true);
+ return generate_array_guard_common(kls, region, NonArray);
}
Node* generate_objArray_guard(Node* kls, RegionNode* region) {
- return generate_array_guard_common(kls, region, true, false);
+ return generate_array_guard_common(kls, region, ObjectArray);
}
Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
- return generate_array_guard_common(kls, region, true, true);
+ return generate_array_guard_common(kls, region, NonObjectArray);
+ }
+ Node* generate_typeArray_guard(Node* kls, RegionNode* region) {
+ return generate_array_guard_common(kls, region, TypeArray);
}
- Node* generate_array_guard_common(Node* kls, RegionNode* region,
- bool obj_array, bool not_array);
+ Node* generate_valueArray_guard(Node* kls, RegionNode* region) {
+ return generate_array_guard_common(kls, region, ValueArray);
+ }
+ Node* generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind);
Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region);
CallJavaNode* generate_method_call(vmIntrinsics::ID method_id,
bool is_virtual = false, bool is_static = false);
CallJavaNode* generate_method_call_static(vmIntrinsics::ID method_id) {
return generate_method_call(method_id, false, true);
@@ -251,10 +267,12 @@
bool inline_unsafe_access(bool is_store, BasicType type, AccessKind kind, bool is_unaligned);
static bool klass_needs_init_guard(Node* kls);
bool inline_unsafe_allocate();
bool inline_unsafe_newArray(bool uninitialized);
bool inline_unsafe_copyMemory();
+ bool inline_unsafe_make_private_buffer();
+ bool inline_unsafe_finish_private_buffer();
bool inline_native_currentThread();
bool inline_native_time_funcs(address method, const char* funcName);
#ifdef JFR_HAVE_INTRINSICS
bool inline_native_classID();
@@ -587,29 +605,33 @@
case vmIntrinsics::_compressStringC:
case vmIntrinsics::_compressStringB: return inline_string_copy( is_compress);
case vmIntrinsics::_inflateStringC:
case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress);
+ case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer();
+ case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer();
case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false);
case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false);
case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_getShort: return inline_unsafe_access(!is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_getChar: return inline_unsafe_access(!is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_getInt: return inline_unsafe_access(!is_store, T_INT, Relaxed, false);
case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false);
+ case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false);
case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false);
case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false);
case vmIntrinsics::_putByte: return inline_unsafe_access( is_store, T_BYTE, Relaxed, false);
case vmIntrinsics::_putShort: return inline_unsafe_access( is_store, T_SHORT, Relaxed, false);
case vmIntrinsics::_putChar: return inline_unsafe_access( is_store, T_CHAR, Relaxed, false);
case vmIntrinsics::_putInt: return inline_unsafe_access( is_store, T_INT, Relaxed, false);
case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false);
case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false);
case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false);
+ case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false);
case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false);
case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false);
case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_store, T_BYTE, Volatile, false);
case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_store, T_SHORT, Volatile, false);
@@ -2358,22 +2380,22 @@
ciSignature* sig = callee()->signature();
#ifdef ASSERT
if (!is_store) {
// Object getReference(Object base, int/long offset), etc.
BasicType rtype = sig->return_type()->basic_type();
- assert(rtype == type, "getter must return the expected value");
- assert(sig->count() == 2, "oop getter has 2 arguments");
+ assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value");
+ assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct");
} else {
// void putReference(Object base, int/long offset, Object x), etc.
assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value");
- assert(sig->count() == 3, "oop putter has 3 arguments");
+ assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments");
assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object");
assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct");
BasicType vtype = sig->type_at(sig->count()-1)->basic_type();
- assert(vtype == type, "putter must accept the expected value");
+ assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value");
}
#endif // ASSERT
}
#endif //PRODUCT
@@ -2394,28 +2416,88 @@
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
// to be plain byte offsets, which are also the same as those accepted
// by oopDesc::field_addr.
assert(Unsafe_field_offset_to_byte_offset(11) == 11,
"fieldOffset must be byte-scaled");
+
+ ciValueKlass* value_klass = NULL;
+ if (type == T_VALUETYPE) {
+ Node* cls = null_check(argument(4));
+ if (stopped()) {
+ return true;
+ }
+ Node* kls = load_klass_from_mirror(cls, false, NULL, 0);
+ const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr();
+ if (!kls_t->klass_is_exact()) {
+ return false;
+ }
+ ciKlass* klass = kls_t->klass();
+ if (!klass->is_valuetype()) {
+ return false;
+ }
+ value_klass = klass->as_value_klass();
+ }
+
+ receiver = null_check(receiver);
+ if (stopped()) {
+ return true;
+ }
+
+ if (base->is_ValueType()) {
+ ValueTypeNode* vt = base->as_ValueType();
+
+ if (is_store) {
+ if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
+ return false;
+ }
+ base = vt->get_oop();
+ } else {
+ if (offset->is_Con()) {
+ long off = find_long_con(offset, 0);
+ ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass();
+ if ((long)(int)off != off || !vk->contains_field_offset(off)) {
+ return false;
+ }
+
+ ciField* f = vk->get_non_flattened_field_by_offset((int)off);
+
+ if (f != NULL) {
+ BasicType bt = f->layout_type();
+ if (bt == T_ARRAY || bt == T_NARROWOOP) {
+ bt = T_OBJECT;
+ }
+ if (bt == type) {
+ if (bt != T_VALUETYPE || f->type() == value_klass) {
+ set_result(vt->field_value_by_offset((int)off, false));
+ return true;
+ }
+ }
+ }
+ }
+ vt = vt->allocate(this)->as_ValueType();
+ base = vt->get_oop();
+ }
+ }
+
// 32-bit machines ignore the high half!
offset = ConvL2X(offset);
adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed);
if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
heap_base_oop = base;
- } else if (type == T_OBJECT) {
+ } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) {
return false; // off-heap oop accesses are not supported
}
// Can base be NULL? Otherwise, always on-heap access.
bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
if (!can_access_non_heap) {
decorators |= IN_HEAP;
}
- val = is_store ? argument(4) : NULL;
+ val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL;
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
// Try to categorize the address.
Compile::AliasType* alias_type = C->alias_type(adr_type);
@@ -2425,11 +2507,35 @@
alias_type->adr_type() == TypeAryPtr::RANGE) {
return false; // not supported
}
bool mismatched = false;
- BasicType bt = alias_type->basic_type();
+ BasicType bt = T_ILLEGAL;
+ ciField* field = NULL;
+ if (adr_type->isa_instptr()) {
+ const TypeInstPtr* instptr = adr_type->is_instptr();
+ ciInstanceKlass* k = instptr->klass()->as_instance_klass();
+ int off = instptr->offset();
+ if (instptr->const_oop() != NULL &&
+ instptr->klass() == ciEnv::current()->Class_klass() &&
+ instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) {
+ k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
+ field = k->get_field_by_offset(off, true);
+ } else {
+ field = k->get_non_flattened_field_by_offset(off);
+ }
+ if (field != NULL) {
+ bt = field->layout_type();
+ }
+ assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match");
+ if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) {
+ bt = T_OBJECT;
+ }
+ } else {
+ bt = alias_type->basic_type();
+ }
+
if (bt != T_ILLEGAL) {
assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
if (bt == T_BYTE && adr_type->isa_aryptr()) {
// Alias type doesn't differentiate between byte[] and boolean[]).
// Use address type to get the element type.
@@ -2446,10 +2552,32 @@
mismatched = (bt != type);
} else if (alias_type->adr_type()->isa_oopptr()) {
mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched
}
+ if (type == T_VALUETYPE) {
+ if (adr_type->isa_instptr()) {
+ if (field == NULL || field->type() != value_klass) {
+ mismatched = true;
+ }
+ } else if (adr_type->isa_aryptr()) {
+ const Type* elem = adr_type->is_aryptr()->elem();
+ if (!elem->isa_valuetype()) {
+ mismatched = true;
+ } else if (elem->is_valuetype()->value_klass() != value_klass) {
+ mismatched = true;
+ }
+ }
+ if (is_store) {
+ const Type* val_t = _gvn.type(val);
+ if (!val_t->isa_valuetype() ||
+ val_t->is_valuetype()->value_klass() != value_klass) {
+ return false;
+ }
+ }
+ }
+
assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
if (mismatched) {
decorators |= C2_MISMATCHED;
}
@@ -2458,37 +2586,47 @@
const Type *value_type = Type::get_const_basic_type(type);
// Figure out the memory ordering.
decorators |= mo_decorator_for_access_kind(kind);
- if (!is_store && type == T_OBJECT) {
+ if (!is_store) {
+ if (type == T_OBJECT) {
const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
if (tjp != NULL) {
value_type = tjp;
}
+ } else if (type == T_VALUETYPE) {
+ value_type = NULL;
}
-
- receiver = null_check(receiver);
- if (stopped()) {
- return true;
}
+
// Heap pointers get a null-check from the interpreter,
// as a courtesy. However, this is not guaranteed by Unsafe,
// and it is not possible to fully distinguish unintended nulls
// from intended ones in this API.
if (!is_store) {
Node* p = NULL;
// Try to constant fold a load from a constant field
- ciField* field = alias_type->field();
+
if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) {
// final or stable field
p = make_constant_from_field(field, heap_base_oop);
}
if (p == NULL) { // Could not constant fold the load
+ if (type == T_VALUETYPE) {
+ if (adr_type->isa_instptr() && !mismatched) {
+ ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
+ int offset = adr_type->is_instptr()->offset();
+ p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators);
+ } else {
+ p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators);
+ }
+ } else {
p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators);
+ }
// Normalize the value returned by getBoolean in the following cases
if (type == T_BOOLEAN &&
(mismatched ||
heap_base_oop == top() || // - heap_base_oop is NULL or
(can_access_non_heap && field == NULL)) // - heap_base_oop is potentially NULL
@@ -2511,10 +2649,18 @@
}
if (type == T_ADDRESS) {
p = gvn().transform(new CastP2XNode(NULL, p));
p = ConvX2UL(p);
}
+ if (field != NULL && field->is_flattenable()&& !field->is_flattened()) {
+ // Load a non-flattened but flattenable value type from memory
+ if (value_type->value_klass()->is_scalarizable()) {
+ p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass());
+ } else {
+ p = null2default(p, value_type->value_klass());
+ }
+ }
// The load node has the control of the preceding MemBarCPUOrder. All
// following nodes will have the control of the MemBarCPUOrder inserted at
// the end of this method. So, pushing the load onto the stack at a later
// point is fine.
set_result(p);
@@ -2522,12 +2668,69 @@
if (bt == T_ADDRESS) {
// Repackage the long as a pointer.
val = ConvL2X(val);
val = gvn().transform(new CastX2PNode(val));
}
+ if (type == T_VALUETYPE) {
+ if (adr_type->isa_instptr() && !mismatched) {
+ ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass();
+ int offset = adr_type->is_instptr()->offset();
+ val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators);
+ } else {
+ val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators);
+ }
+ } else {
access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators);
}
+ }
+
+ if (argument(1)->is_ValueType() && is_store) {
+ Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass());
+ value = value->as_ValueType()->make_larval(this, false);
+ replace_in_map(argument(1), value);
+ }
+
+ return true;
+}
+
+bool LibraryCallKit::inline_unsafe_make_private_buffer() {
+ Node* receiver = argument(0);
+ Node* value = argument(1);
+
+ receiver = null_check(receiver);
+ if (stopped()) {
+ return true;
+ }
+
+ if (!value->is_ValueType()) {
+ return false;
+ }
+
+ set_result(value->as_ValueType()->make_larval(this, true));
+
+ return true;
+}
+
+bool LibraryCallKit::inline_unsafe_finish_private_buffer() {
+ Node* receiver = argument(0);
+ Node* buffer = argument(1);
+
+ receiver = null_check(receiver);
+ if (stopped()) {
+ return true;
+ }
+
+ if (!buffer->is_ValueType()) {
+ return false;
+ }
+
+ ValueTypeNode* vt = buffer->as_ValueType();
+ if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) {
+ return false;
+ }
+
+ set_result(vt->finish_larval(this));
return true;
}
//----------------------------inline_unsafe_load_store----------------------------
@@ -3060,19 +3263,10 @@
C->set_has_split_ifs(true); // Has chance for split-if optimization
set_result(result_rgn, result_val);
return true;
}
-//---------------------------load_mirror_from_klass----------------------------
-// Given a klass oop, load its java mirror (a java.lang.Class oop).
-Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
- Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
- Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
- // mirror = ((OopHandle)mirror)->resolve();
- return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
-}
-
//-----------------------load_klass_from_mirror_common-------------------------
// Given a java mirror (a java.lang.Class oop), load its corresponding klass oop.
// Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
// and branch to the given path on the region.
// If never_see_null, take an uncommon trap on null, so we can optimistically
@@ -3115,10 +3309,14 @@
}
Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
}
+Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) {
+ return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region);
+}
+
//-------------------------inline_native_Class_query-------------------
bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
const Type* return_type = TypeInt::BOOL;
Node* prim_return_value = top(); // what happens if it's a primitive class?
bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
@@ -3299,22 +3497,32 @@
return false; // dead path (mirror->is_top()).
}
if (obj == NULL || obj->is_top()) {
return false; // dead path
}
+
+ ciKlass* obj_klass = NULL;
+ if (obj->is_ValueType()) {
+ const TypeValueType* tvt = _gvn.type(obj)->is_valuetype();
+ obj_klass = tvt->value_klass();
+ } else {
const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr();
+ if (tp != NULL) {
+ obj_klass = tp->klass();
+ }
+ }
// First, see if Class.cast() can be folded statically.
// java_mirror_type() returns non-null for compile-time Class constants.
ciType* tm = mirror_con->java_mirror_type();
if (tm != NULL && tm->is_klass() &&
- tp != NULL && tp->klass() != NULL) {
- if (!tp->klass()->is_loaded()) {
+ obj_klass != NULL) {
+ if (!obj_klass->is_loaded()) {
// Don't use intrinsic when class is not loaded.
return false;
} else {
- int static_res = C->static_subtype_check(tm->as_klass(), tp->klass());
+ int static_res = C->static_subtype_check(tm->as_klass(), obj_klass);
if (static_res == Compile::SSC_always_true) {
// isInstance() is true - fold the code.
set_result(obj);
return true;
} else if (static_res == Compile::SSC_always_false) {
@@ -3478,59 +3686,78 @@
set_result(_gvn.transform(phi));
return true;
}
//---------------------generate_array_guard_common------------------------
-Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region,
- bool obj_array, bool not_array) {
+Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) {
if (stopped()) {
return NULL;
}
- // If obj_array/non_array==false/false:
- // Branch around if the given klass is in fact an array (either obj or prim).
- // If obj_array/non_array==false/true:
- // Branch around if the given klass is not an array klass of any kind.
- // If obj_array/non_array==true/true:
- // Branch around if the kls is not an oop array (kls is int[], String, etc.)
- // If obj_array/non_array==true/false:
- // Branch around if the kls is an oop array (Object[] or subtype)
- //
// Like generate_guard, adds a new path onto the region.
jint layout_con = 0;
Node* layout_val = get_layout_helper(kls, layout_con);
if (layout_val == NULL) {
- bool query = (obj_array
- ? Klass::layout_helper_is_objArray(layout_con)
- : Klass::layout_helper_is_array(layout_con));
- if (query == not_array) {
+ bool query = 0;
+ switch(kind) {
+ case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break;
+ case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break;
+ case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break;
+ case ValueArray: query = Klass::layout_helper_is_valueArray(layout_con); break;
+ case AnyArray: query = Klass::layout_helper_is_array(layout_con); break;
+ case NonArray: query = !Klass::layout_helper_is_array(layout_con); break;
+ default:
+ ShouldNotReachHere();
+ }
+ if (!query) {
return NULL; // never a branch
} else { // always a branch
Node* always_branch = control();
if (region != NULL)
region->add_req(always_branch);
set_control(top());
return always_branch;
}
}
+ unsigned int value = 0;
+ BoolTest::mask btest = BoolTest::illegal;
+ switch(kind) {
+ case ObjectArray:
+ case NonObjectArray: {
+ value = Klass::_lh_array_tag_obj_value;
+ layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+ btest = kind == ObjectArray ? BoolTest::eq : BoolTest::ne;
+ break;
+ }
+ case TypeArray: {
+ value = Klass::_lh_array_tag_type_value;
+ layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+ btest = BoolTest::eq;
+ break;
+ }
+ case ValueArray: {
+ value = Klass::_lh_array_tag_vt_value;
+ layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+ btest = BoolTest::eq;
+ break;
+ }
+ case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break;
+ case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break;
+ default:
+ ShouldNotReachHere();
+ }
// Now test the correct condition.
- jint nval = (obj_array
- ? (jint)(Klass::_lh_array_tag_type_value
- << Klass::_lh_array_tag_shift)
- : Klass::_lh_neutral_value);
+ jint nval = (jint)value;
Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval)));
- BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array
- // invert the test if we are looking for a non-array
- if (not_array) btest = BoolTest(btest).negate();
Node* bol = _gvn.transform(new BoolNode(cmp, btest));
return generate_fair_guard(bol, region);
}
//-----------------------inline_native_newArray--------------------------
-// private static native Object java.lang.reflect.newArray(Class<?> componentType, int length);
+// private static native Object java.lang.reflect.Array.newArray(Class<?> componentType, int length);
// private native Object Unsafe.allocateUninitializedArray0(Class<?> cls, int size);
bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) {
Node* mirror;
Node* count_val;
if (uninitialized) {
@@ -3642,10 +3869,23 @@
Node* original = argument(0);
Node* start = is_copyOfRange? argument(1): intcon(0);
Node* end = is_copyOfRange? argument(2): argument(1);
Node* array_type_mirror = is_copyOfRange? argument(3): argument(2);
+ const TypeAryPtr* original_t = _gvn.type(original)->isa_aryptr();
+ const TypeInstPtr* mirror_t = _gvn.type(array_type_mirror)->isa_instptr();
+ if (EnableValhalla && ValueArrayFlatten &&
+ (original_t == NULL || mirror_t == NULL ||
+ (mirror_t->java_mirror_type() == NULL &&
+ (original_t->elem()->isa_valuetype() ||
+ (original_t->elem()->make_oopptr() != NULL &&
+ original_t->elem()->make_oopptr()->can_be_value_type()))))) {
+ // We need to know statically if the copy is to a flattened array
+ // or not but can't tell.
+ return false;
+ }
+
Node* newcopy = NULL;
// Set the original stack and the reexecute bit for the interpreter to reexecute
// the bytecode that invokes Arrays.copyOf if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
@@ -3665,20 +3905,62 @@
RegionNode* bailout = new RegionNode(1);
record_for_igvn(bailout);
// Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc.
// Bail out if that is so.
- Node* not_objArray = generate_non_objArray_guard(klass_node, bailout);
+ // Value type array may have object field that would require a
+ // write barrier. Conservatively, go to slow path.
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ Node* not_objArray = !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing) ?
+ generate_typeArray_guard(klass_node, bailout) : generate_non_objArray_guard(klass_node, bailout);
if (not_objArray != NULL) {
// Improve the klass node's type from the new optimistic assumption:
ciKlass* ak = ciArrayKlass::make(env()->Object_klass());
- const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/);
+ const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0));
Node* cast = new CastPPNode(klass_node, akls);
cast->init_req(0, control());
klass_node = _gvn.transform(cast);
}
+ Node* original_kls = load_object_klass(original);
+ // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
+ // loads/stores but it is legal only if we're sure the
+ // Arrays.copyOf would succeed. So we need all input arguments
+ // to the copyOf to be validated, including that the copy to the
+ // new array won't trigger an ArrayStoreException. That subtype
+ // check can be optimized if we know something on the type of
+ // the input array from type speculation.
+ if (_gvn.type(klass_node)->singleton() && !stopped()) {
+ ciKlass* subk = _gvn.type(original_kls)->is_klassptr()->klass();
+ ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
+
+ int test = C->static_subtype_check(superk, subk);
+ if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
+ const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
+ if (t_original->speculative_type() != NULL) {
+ original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
+ original_kls = load_object_klass(original);
+ }
+ }
+ }
+
+ if (EnableValhalla) {
+ // Either both or neither new array klass and original array
+ // klass must be flattened
+ Node* flattened_klass = generate_valueArray_guard(klass_node, NULL);
+ generate_valueArray_guard(original_kls, bailout);
+ if (flattened_klass != NULL) {
+ RegionNode* r = new RegionNode(2);
+ record_for_igvn(r);
+ r->init_req(1, control());
+ set_control(flattened_klass);
+ generate_valueArray_guard(original_kls, r);
+ bailout->add_req(control());
+ set_control(_gvn.transform(r));
+ }
+ }
+
// Bail out if either start or end is negative.
generate_negative_guard(start, bailout, &start);
generate_negative_guard(end, bailout, &end);
Node* length = end;
@@ -3711,35 +3993,15 @@
// We know the copy is disjoint but we might not know if the
// oop stores need checking.
// Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class).
// This will fail a store-check if x contains any non-nulls.
- // ArrayCopyNode:Ideal may transform the ArrayCopyNode to
- // loads/stores but it is legal only if we're sure the
- // Arrays.copyOf would succeed. So we need all input arguments
- // to the copyOf to be validated, including that the copy to the
- // new array won't trigger an ArrayStoreException. That subtype
- // check can be optimized if we know something on the type of
- // the input array from type speculation.
- if (_gvn.type(klass_node)->singleton()) {
- ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass();
- ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass();
-
- int test = C->static_subtype_check(superk, subk);
- if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) {
- const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr();
- if (t_original->speculative_type() != NULL) {
- original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true);
- }
- }
- }
-
bool validated = false;
// Reason_class_check rather than Reason_intrinsic because we
// want to intrinsify even if this traps.
if (!too_many_traps(Deoptimization::Reason_class_check)) {
- Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original),
+ Node* not_subtype_ctrl = gen_subtype_check(original_kls,
klass_node);
if (not_subtype_ctrl != top()) {
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
@@ -3752,11 +4014,11 @@
if (!stopped()) {
newcopy = new_array(klass_node, length, 0); // no arguments to push
ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false,
- load_object_klass(original), klass_node);
+ original_kls, klass_node);
if (!is_copyOfRange) {
ac->set_copyof(validated);
} else {
ac->set_copyofrange(validated);
}
@@ -3876,21 +4138,25 @@
RegionNode* result_reg = new RegionNode(PATH_LIMIT);
PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT);
PhiNode* result_io = new PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
- Node* obj = NULL;
+ Node* obj = argument(0);
+
+ if (obj->is_ValueType() || gvn().type(obj)->is_valuetypeptr()) {
+ return false;
+ }
+
if (!is_static) {
// Check for hashing null object
obj = null_check_receiver();
if (stopped()) return true; // unconditionally null
result_reg->init_req(_null_path, top());
result_val->init_req(_null_path, top());
} else {
// Do a null check, and return zero if null.
// System.identityHashCode(null) == 0
- obj = argument(0);
Node* null_ctl = top();
obj = null_check_oop(obj, &null_ctl);
result_reg->init_req(_null_path, null_ctl);
result_val->init_req(_null_path, _gvn.intcon(0));
}
@@ -3906,10 +4172,17 @@
// We only go to the fast case code if we pass a number of guards. The
// paths which do not pass are accumulated in the slow_region.
RegionNode* slow_region = new RegionNode(1);
record_for_igvn(slow_region);
+ const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
+ assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here");
+ if (is_static && obj_type->can_be_value_type()) {
+ Node* obj_klass = load_object_klass(obj);
+ generate_value_guard(obj_klass, slow_region);
+ }
+
// If this is a virtual call, we generate a funny guard. We pull out
// the vtable entry corresponding to hashCode() from the target object.
// If the target method which we are calling happens to be the native
// Object hashCode() method, we pass the guard. We do not need this
// guard for non-virtual calls -- the caller is known to be the native
@@ -3992,11 +4265,17 @@
//---------------------------inline_native_getClass----------------------------
// public final native Class<?> java.lang.Object.getClass();
//
// Build special case code for calls to getClass on an object.
bool LibraryCallKit::inline_native_getClass() {
- Node* obj = null_check_receiver();
+ Node* obj = argument(0);
+ if (obj->is_ValueType()) {
+ ciKlass* vk = _gvn.type(obj)->is_valuetype()->value_klass();
+ set_result(makecon(TypeInstPtr::make(vk->java_mirror())));
+ return true;
+ }
+ obj = null_check_receiver();
if (stopped()) return true;
set_result(load_mirror_from_klass(load_object_klass(obj)));
return true;
}
@@ -4249,11 +4528,38 @@
// Copy the fastest available way.
// TODO: generate fields copies for small objects instead.
Node* size = _gvn.transform(obj_size);
- access_clone(obj, alloc_obj, size, is_array);
+ // Exclude the header but include array length to copy by 8 bytes words.
+ // Can't use base_offset_in_bytes(bt) since basic type is unknown.
+ int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
+ instanceOopDesc::base_offset_in_bytes();
+ // base_off:
+ // 8 - 32-bit VM
+ // 12 - 64-bit VM, compressed klass
+ // 16 - 64-bit VM, normal klass
+ if (base_off % BytesPerLong != 0) {
+ assert(UseCompressedClassPointers, "");
+ if (is_array) {
+ // Exclude length to copy by 8 bytes words.
+ base_off += sizeof(int);
+ } else {
+ // Include klass to copy by 8 bytes words.
+ base_off = instanceOopDesc::klass_offset_in_bytes();
+ }
+ assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
+ }
+ Node* src_base = basic_plus_adr(obj, base_off);
+ Node* dst_base = basic_plus_adr(alloc_obj, base_off);
+
+ // Compute the length also, if needed:
+ Node* countx = size;
+ countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off)));
+ countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong)));
+
+ access_clone(src_base, dst_base, countx, is_array);
// Do not let reads from the cloned object float above the arraycopy.
if (alloc != NULL) {
// Do not let stores that initialize this object be reordered with
// a subsequent store that would make this object accessible by
@@ -4292,21 +4598,27 @@
// Set the reexecute bit for the interpreter to reexecute
// the bytecode that invokes Object.clone if deoptimization happens.
{ PreserveReexecuteState preexecs(this);
jvms()->set_should_reexecute(true);
- Node* obj = null_check_receiver();
+ Node* obj = argument(0);
+ if (obj->is_ValueType()) {
+ return false;
+ }
+
+ obj = null_check_receiver();
if (stopped()) return true;
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
// If we are going to clone an instance, we need its exact type to
// know the number and types of fields to convert the clone to
// loads/stores. Maybe a speculative type can help us.
if (!obj_type->klass_is_exact() &&
obj_type->speculative_type() != NULL &&
- obj_type->speculative_type()->is_instance_klass()) {
+ obj_type->speculative_type()->is_instance_klass() &&
+ !obj_type->speculative_type()->is_valuetype()) {
ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass();
if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem &&
!spec_ik->has_injected_fields()) {
ciKlass* k = obj_type->klass();
if (!k->is_instance_klass() ||
@@ -4339,15 +4651,29 @@
result_val = new PhiNode(result_reg, TypeInstPtr::NOTNULL);
PhiNode* result_i_o = new PhiNode(result_reg, Type::ABIO);
PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM);
record_for_igvn(result_reg);
+ // We only go to the fast case code if we pass a number of guards.
+ // The paths which do not pass are accumulated in the slow_region.
+ RegionNode* slow_region = new RegionNode(1);
+ record_for_igvn(slow_region);
+
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
if (array_ctl != NULL) {
// It's an array.
PreserveJVMState pjvms(this);
set_control(array_ctl);
+
+ BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+ if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) {
+ // Value type array may have object field that would require a
+ // write barrier. Conservatively, go to slow path.
+ generate_valueArray_guard(obj_klass, slow_region);
+ }
+
+ if (!stopped()) {
Node* obj_length = load_array_length(obj);
Node* obj_size = NULL;
Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
@@ -4356,11 +4682,10 @@
// because gc barriers are required when accessing the array.
Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL);
if (is_obja != NULL) {
PreserveJVMState pjvms2(this);
set_control(is_obja);
- obj = access_resolve(obj, ACCESS_READ);
// Generate a direct call to the right arraycopy function(s).
Node* alloc = tightly_coupled_allocation(alloc_obj, NULL);
ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false);
ac->set_cloneoop();
Node* n = _gvn.transform(ac);
@@ -4371,10 +4696,11 @@
result_val->init_req(_objArray_path, alloc_obj);
result_i_o ->set_req(_objArray_path, i_o());
result_mem ->set_req(_objArray_path, reset_memory());
}
}
+
// Otherwise, there are no barriers to worry about.
// (We can dispense with card marks if we know the allocation
// comes out of eden (TLAB)... In fact, ReduceInitialCardMarks
// causes the non-eden paths to take compensating steps to
// simulate a fresh allocation, so that no further
@@ -4389,15 +4715,12 @@
result_val->init_req(_array_path, alloc_obj);
result_i_o ->set_req(_array_path, i_o());
result_mem ->set_req(_array_path, reset_memory());
}
}
+ }
- // We only go to the instance fast case code if we pass a number of guards.
- // The paths which do not pass are accumulated in the slow_region.
- RegionNode* slow_region = new RegionNode(1);
- record_for_igvn(slow_region);
if (!stopped()) {
// It's an instance (we did array above). Make the slow-path tests.
// If this is a virtual call, we generate a funny guard. We grab
// the vtable entry corresponding to clone() from the target object.
// If the target method which we are calling happens to be the
@@ -4554,15 +4877,14 @@
map()->replaced_nodes().apply(saved_jvms->map(), new_idx);
set_jvms(saved_jvms);
_reexecute_sp = saved_reexecute_sp;
// Remove the allocation from above the guards
- CallProjections callprojs;
- alloc->extract_projections(&callprojs, true);
+ CallProjections* callprojs = alloc->extract_projections(true);
InitializeNode* init = alloc->initialization();
Node* alloc_mem = alloc->in(TypeFunc::Memory);
- C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O));
+ C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O));
C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem);
C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0));
// move the allocation here (after the guards)
_gvn.hash_delete(alloc);
@@ -4570,11 +4892,11 @@
alloc->set_req(TypeFunc::I_O, i_o());
Node *mem = reset_memory();
set_all_memory(mem);
alloc->set_req(TypeFunc::Memory, mem);
set_control(init->proj_out_or_null(TypeFunc::Control));
- set_i_o(callprojs.fallthrough_ioproj);
+ set_i_o(callprojs->fallthrough_ioproj);
// Update memory as done in GraphKit::set_output_for_allocation()
const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength));
const TypeOopPtr* ary_type = _gvn.type(alloc->in(AllocateNode::KlassNode))->is_klassptr()->as_instance_type();
if (ary_type->isa_aryptr() && length_type != NULL) {
@@ -4814,21 +5136,37 @@
set_control(not_subtype_ctrl);
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_make_not_entrant);
assert(stopped(), "Should be stopped");
}
+
+ const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
+ const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
+ src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
+
+ src_type = _gvn.type(src);
+ top_src = src_type->isa_aryptr();
+
+ if (top_dest != NULL &&
+ top_dest->elem()->make_oopptr() != NULL &&
+ top_dest->elem()->make_oopptr()->can_be_value_type()) {
+ generate_valueArray_guard(load_object_klass(dest), slow_region);
+ }
+
+ if (top_src != NULL &&
+ top_src->elem()->make_oopptr() != NULL &&
+ top_src->elem()->make_oopptr()->can_be_value_type()) {
+ generate_valueArray_guard(load_object_klass(src), slow_region);
+ }
+
{
PreserveJVMState pjvms(this);
set_control(_gvn.transform(slow_region));
uncommon_trap(Deoptimization::Reason_intrinsic,
Deoptimization::Action_make_not_entrant);
assert(stopped(), "Should be stopped");
}
-
- const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr();
- const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass());
- src = _gvn.transform(new CheckCastPPNode(control(), src, toop));
}
arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);
if (stopped()) {
< prev index next >