< prev index next >
src/hotspot/share/opto/graphKit.cpp
Print this page
@@ -23,10 +23,11 @@
*/
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
#include "compiler/compileLog.hpp"
+#include "ci/ciValueKlass.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/c2/barrierSetC2.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "opto/addnode.hpp"
@@ -35,28 +36,38 @@
#include "opto/graphKit.hpp"
#include "opto/idealKit.hpp"
#include "opto/intrinsicnode.hpp"
#include "opto/locknode.hpp"
#include "opto/machnode.hpp"
+#include "opto/narrowptrnode.hpp"
#include "opto/opaquenode.hpp"
#include "opto/parse.hpp"
#include "opto/rootnode.hpp"
#include "opto/runtime.hpp"
+#include "opto/valuetypenode.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/sharedRuntime.hpp"
//----------------------------GraphKit-----------------------------------------
// Main utility constructor.
-GraphKit::GraphKit(JVMState* jvms)
+GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
: Phase(Phase::Parser),
_env(C->env()),
- _gvn(*C->initial_gvn()),
+ _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()),
_barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
{
+ assert(gvn == NULL || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
_exceptions = jvms->map()->next_exception();
if (_exceptions != NULL) jvms->map()->set_next_exception(NULL);
set_jvms(jvms);
+#ifdef ASSERT
+ if (_gvn.is_IterGVN() != NULL) {
+ assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
+ // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
+ _worklist_size = _gvn.C->for_igvn()->size();
+ }
+#endif
}
// Private constructor for parser.
GraphKit::GraphKit()
: Phase(Phase::Parser),
@@ -821,20 +832,21 @@
ciMethod* cur_method = jvms->method();
int cur_bci = jvms->bci();
if (cur_method != NULL && cur_bci != InvocationEntryBci) {
Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
return Interpreter::bytecode_should_reexecute(code) ||
- (is_anewarray && code == Bytecodes::_multianewarray);
+ (is_anewarray && (code == Bytecodes::_multianewarray));
// Reexecute _multianewarray bytecode which was replaced with
// sequence of [a]newarray. See Parse::do_multianewarray().
//
// Note: interpreter should not have it set since this optimization
// is limited by dimensions and guarded by flag so in some cases
// multianewarray() runtime calls will be generated and
// the bytecode should not be reexecutes (stack will not be reset).
- } else
+ } else {
return false;
+ }
}
// Helper function for adding JVMState and debug information to node
void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
// Add the safepoint edges to the call (or other safepoint).
@@ -1074,10 +1086,19 @@
assert(rsize == 1, "");
depth = rsize - inputs;
}
break;
+ case Bytecodes::_withfield: {
+ bool ignored_will_link;
+ ciField* field = method()->get_field_at_bci(bci(), ignored_will_link);
+ int size = field->type()->size();
+ inputs = size+1;
+ depth = rsize - inputs;
+ break;
+ }
+
case Bytecodes::_ireturn:
case Bytecodes::_lreturn:
case Bytecodes::_freturn:
case Bytecodes::_dreturn:
case Bytecodes::_areturn:
@@ -1199,10 +1220,11 @@
// Construct NULL check
Node *chk = NULL;
switch(type) {
case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
+ case T_VALUETYPE : // fall through
case T_ARRAY : // fall through
type = T_OBJECT; // simplify further tests
case T_OBJECT : {
const Type *t = _gvn.type( value );
@@ -1370,14 +1392,32 @@
}
return value;
}
+Node* GraphKit::null2default(Node* value, ciValueKlass* vk) {
+ Node* null_ctl = top();
+ value = null_check_oop(value, &null_ctl);
+ if (!null_ctl->is_top()) {
+ // Return default value if oop is null
+ Node* region = new RegionNode(3);
+ region->init_req(1, control());
+ region->init_req(2, null_ctl);
+ value = PhiNode::make(region, value, TypeInstPtr::make(TypePtr::BotPTR, vk));
+ value->set_req(2, ValueTypeNode::default_oop(gvn(), vk));
+ set_control(gvn().transform(region));
+ value = gvn().transform(value);
+ }
+ return value;
+}
//------------------------------cast_not_null----------------------------------
// Cast obj to not-null on this path
Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
+ if (obj->is_ValueType()) {
+ return obj;
+ }
const Type *t = _gvn.type(obj);
const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
// Object is already not-null?
if( t == t_not_null ) return obj;
@@ -1502,11 +1542,12 @@
ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
} else {
ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe);
}
ld = _gvn.transform(ld);
- if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
+
+ if (((bt == T_OBJECT || bt == T_VALUETYPE) && C->do_escape_analysis()) || C->eliminate_boxing()) {
// Improve graph before escape analysis and boxing elimination.
record_for_igvn(ld);
}
return ld;
}
@@ -1553,11 +1594,12 @@
Node* adr,
const TypePtr* adr_type,
Node* val,
const Type* val_type,
BasicType bt,
- DecoratorSet decorators) {
+ DecoratorSet decorators,
+ bool deoptimize_on_exception) {
// Transformation of a value which could be NULL pointer (CastPP #NULL)
// could be delayed during Parse (for example, in adjust_map_after_if()).
// Execute transformation here to avoid barrier generation in such case.
if (_gvn.type(val) == TypePtr::NULL_PTR) {
val = _gvn.makecon(TypePtr::NULL_PTR);
@@ -1566,10 +1608,14 @@
if (stopped()) {
return top(); // Dead path ?
}
assert(val != NULL, "not dead path");
+ if (val->is_ValueType()) {
+ // Allocate value type and get oop
+ val = val->as_ValueType()->allocate(this, deoptimize_on_exception)->get_oop();
+ }
C2AccessValuePtr addr(adr, adr_type);
C2AccessValue value(val, val_type);
C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
if (access.is_raw()) {
@@ -1686,12 +1732,12 @@
} else {
return _barrier_set->atomic_add_at(access, new_val, value_type);
}
}
-void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
- return _barrier_set->clone(this, src, dst, size, is_array);
+void GraphKit::access_clone(Node* src_base, Node* dst_base, Node* countx, bool is_array) {
+ return _barrier_set->clone(this, src_base, dst_base, countx, is_array);
}
Node* GraphKit::access_resolve(Node* n, DecoratorSet decorators) {
// Use stronger ACCESS_WRITE|ACCESS_READ by default.
if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
@@ -1702,10 +1748,15 @@
//-------------------------array_element_address-------------------------
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
const TypeInt* sizetype, Node* ctrl) {
uint shift = exact_log2(type2aelembytes(elembt));
+ ciKlass* arytype_klass = _gvn.type(ary)->is_aryptr()->klass();
+ if (arytype_klass->is_value_array_klass()) {
+ ciValueArrayKlass* vak = arytype_klass->as_value_array_klass();
+ shift = vak->log2_element_size();
+ }
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
// short-circuit a common case (saves lots of confusing waste motion)
jint idx_con = find_int_con(idx, -1);
if (idx_con >= 0) {
@@ -1722,26 +1773,60 @@
//-------------------------load_array_element-------------------------
Node* GraphKit::load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype) {
const Type* elemtype = arytype->elem();
BasicType elembt = elemtype->array_element_basic_type();
+ assert(elembt != T_VALUETYPE, "value types are not supported by this method");
Node* adr = array_element_address(ary, idx, elembt, arytype->size());
if (elembt == T_NARROWOOP) {
elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
}
Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
return ld;
}
//-------------------------set_arguments_for_java_call-------------------------
// Arguments (pre-popped from the stack) are taken from the JVMS.
-void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
+void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool incremental_inlining) {
// Add the call arguments:
- uint nargs = call->method()->arg_size();
- for (uint i = 0; i < nargs; i++) {
- Node* arg = argument(i);
- call->init_req(i + TypeFunc::Parms, arg);
+ const TypeTuple* domain = call->tf()->domain_sig();
+ ExtendedSignature sig_cc = ExtendedSignature(call->method()->get_sig_cc(), SigEntryFilter());
+ uint nargs = domain->cnt();
+ for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
+ Node* arg = argument(i-TypeFunc::Parms);
+ const Type* t = domain->field_at(i);
+ if (call->method()->has_scalarized_args() && t->is_valuetypeptr() && !t->maybe_null()) {
+ // We don't pass value type arguments by reference but instead
+ // pass each field of the value type
+ ValueTypeNode* vt = arg->isa_ValueType();
+ if (vt == NULL) {
+ // TODO why is that?? Shouldn't we always see a valuetype node here?
+ vt = ValueTypeNode::make_from_oop(this, arg, t->value_klass());
+ }
+ vt->pass_fields(this, call, sig_cc, idx);
+ // If a value type argument is passed as fields, attach the Method* to the call site
+ // to be able to access the extended signature later via attached_method_before_pc().
+ // For example, see CompiledMethod::preserve_callee_argument_oops().
+ call->set_override_symbolic_info(true);
+ continue;
+ } else if (arg->is_ValueType()) {
+ // Pass value type argument via oop to callee
+ if (!incremental_inlining) {
+ arg = arg->as_ValueType()->allocate(this)->get_oop();
+ } else {
+ arg = ValueTypePtrNode::make_from_value_type(this, arg->as_ValueType(), false);
+ }
+ }
+ call->init_req(idx++, arg);
+ // Skip reserved arguments
+ BasicType bt = t->basic_type();
+ while (SigEntry::next_is_reserved(sig_cc, bt, true)) {
+ call->init_req(idx++, top());
+ if (type2size[bt] == 2) {
+ call->init_req(idx++, top());
+ }
+ }
}
}
//---------------------------set_edges_for_java_call---------------------------
// Connect a newly created call into the current JVMS.
@@ -1775,17 +1860,10 @@
}
Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
if (stopped()) return top(); // maybe the call folded up?
- // Capture the return value, if any.
- Node* ret;
- if (call->method() == NULL ||
- call->method()->return_type()->basic_type() == T_VOID)
- ret = top();
- else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
-
// Note: Since any out-of-line call can produce an exception,
// we always insert an I_O projection from the call into the result.
make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
@@ -1794,10 +1872,29 @@
// through and exceptional paths, so replace the projections for
// the fall through path.
set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
}
+
+ // Capture the return value, if any.
+ Node* ret;
+ if (call->method() == NULL || call->method()->return_type()->basic_type() == T_VOID) {
+ ret = top();
+ } else if (call->tf()->returns_value_type_as_fields()) {
+ // Return of multiple values (value type fields): we create a
+ // ValueType node, each field is a projection from the call.
+ ciValueKlass* vk = call->method()->return_type()->as_value_klass();
+ const Array<SigEntry>* sig_array = vk->extended_sig();
+ GrowableArray<SigEntry> sig = GrowableArray<SigEntry>(sig_array->length());
+ sig.appendAll(sig_array);
+ ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter());
+ uint base_input = TypeFunc::Parms + 1;
+ ret = ValueTypeNode::make_from_multi(this, call, sig_cc, vk, base_input, false);
+ } else {
+ ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
+ }
+
return ret;
}
//--------------------set_predefined_input_for_runtime_call--------------------
// Reading and setting the memory state is way conservative here.
@@ -1872,76 +1969,76 @@
Node* ex_ctl = top();
SafePointNode* final_state = stop();
// Find all the needed outputs of this call
- CallProjections callprojs;
- call->extract_projections(&callprojs, true);
+ CallProjections* callprojs = call->extract_projections(true);
Node* init_mem = call->in(TypeFunc::Memory);
Node* final_mem = final_state->in(TypeFunc::Memory);
Node* final_ctl = final_state->in(TypeFunc::Control);
Node* final_io = final_state->in(TypeFunc::I_O);
// Replace all the old call edges with the edges from the inlining result
- if (callprojs.fallthrough_catchproj != NULL) {
- C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
+ if (callprojs->fallthrough_catchproj != NULL) {
+ C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
}
- if (callprojs.fallthrough_memproj != NULL) {
+ if (callprojs->fallthrough_memproj != NULL) {
if (final_mem->is_MergeMem()) {
// Parser's exits MergeMem was not transformed but may be optimized
final_mem = _gvn.transform(final_mem);
}
- C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
+ C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
}
- if (callprojs.fallthrough_ioproj != NULL) {
- C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
+ if (callprojs->fallthrough_ioproj != NULL) {
+ C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
}
// Replace the result with the new result if it exists and is used
- if (callprojs.resproj != NULL && result != NULL) {
- C->gvn_replace_by(callprojs.resproj, result);
+ if (callprojs->resproj[0] != NULL && result != NULL) {
+ assert(callprojs->nb_resproj == 1, "unexpected number of results");
+ C->gvn_replace_by(callprojs->resproj[0], result);
}
if (ejvms == NULL) {
// No exception edges to simply kill off those paths
- if (callprojs.catchall_catchproj != NULL) {
- C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
+ if (callprojs->catchall_catchproj != NULL) {
+ C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
}
- if (callprojs.catchall_memproj != NULL) {
- C->gvn_replace_by(callprojs.catchall_memproj, C->top());
+ if (callprojs->catchall_memproj != NULL) {
+ C->gvn_replace_by(callprojs->catchall_memproj, C->top());
}
- if (callprojs.catchall_ioproj != NULL) {
- C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
+ if (callprojs->catchall_ioproj != NULL) {
+ C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
}
// Replace the old exception object with top
- if (callprojs.exobj != NULL) {
- C->gvn_replace_by(callprojs.exobj, C->top());
+ if (callprojs->exobj != NULL) {
+ C->gvn_replace_by(callprojs->exobj, C->top());
}
} else {
GraphKit ekit(ejvms);
// Load my combined exception state into the kit, with all phis transformed:
SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
replaced_nodes_exception = ex_map->replaced_nodes();
Node* ex_oop = ekit.use_exception_state(ex_map);
- if (callprojs.catchall_catchproj != NULL) {
- C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
+ if (callprojs->catchall_catchproj != NULL) {
+ C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
ex_ctl = ekit.control();
}
- if (callprojs.catchall_memproj != NULL) {
- C->gvn_replace_by(callprojs.catchall_memproj, ekit.reset_memory());
+ if (callprojs->catchall_memproj != NULL) {
+ C->gvn_replace_by(callprojs->catchall_memproj, ekit.reset_memory());
}
- if (callprojs.catchall_ioproj != NULL) {
- C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
+ if (callprojs->catchall_ioproj != NULL) {
+ C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
}
// Replace the old exception object with the newly created one
- if (callprojs.exobj != NULL) {
- C->gvn_replace_by(callprojs.exobj, ex_oop);
+ if (callprojs->exobj != NULL) {
+ C->gvn_replace_by(callprojs->exobj, ex_oop);
}
}
// Disconnect the call from the graph
call->disconnect_inputs(NULL, C);
@@ -1960,11 +2057,11 @@
while (wl.size() > 0) {
_gvn.transform(wl.pop());
}
}
- if (callprojs.fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
+ if (callprojs->fallthrough_catchproj != NULL && !final_ctl->is_top() && do_replaced_nodes) {
replaced_nodes.apply(C, final_ctl);
}
if (!ex_ctl->is_top() && do_replaced_nodes) {
replaced_nodes_exception.apply(C, ex_ctl);
}
@@ -2137,13 +2234,13 @@
void GraphKit::round_double_arguments(ciMethod* dest_method) {
// (Note: TypeFunc::make has a cache that makes this fast.)
const TypeFunc* tf = TypeFunc::make(dest_method);
- int nargs = tf->domain()->cnt() - TypeFunc::Parms;
+ int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
for (int j = 0; j < nargs; j++) {
- const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
+ const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
if( targ->basic_type() == T_DOUBLE ) {
// If any parameters are doubles, they must be rounded before
// the call, dstore_rounding does gvn.transform
Node *arg = argument(j);
arg = dstore_rounding(arg);
@@ -2196,11 +2293,11 @@
}
if (speculative != current_type->speculative()) {
// Build a type with a speculative type (what we think we know
// about the type but will need a guard when we use it)
- const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
+ const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
// We're changing the type, we need a new CheckCast node to carry
// the new type. The new type depends on the control: what
// profiling tells us is only valid from here as far as we can
// tell.
Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
@@ -2261,15 +2358,15 @@
void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
if (!UseTypeSpeculation) {
return;
}
const TypeFunc* tf = TypeFunc::make(dest_method);
- int nargs = tf->domain()->cnt() - TypeFunc::Parms;
+ int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
- const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
- if (targ->basic_type() == T_OBJECT || targ->basic_type() == T_ARRAY) {
+ const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
+ if (targ->isa_oopptr()) {
ProfilePtrKind ptr_kind = ProfileMaybeNull;
ciKlass* better_type = NULL;
if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
record_profile_for_speculation(argument(j), better_type, ptr_kind);
}
@@ -2778,29 +2875,40 @@
Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
float prob,
Node* *casted_receiver) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
Node* recv_klass = load_object_klass(receiver);
- Node* want_klass = makecon(tklass);
- Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass) );
- Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) );
- IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
- set_control( _gvn.transform( new IfTrueNode (iff) ));
- Node* fail = _gvn.transform( new IfFalseNode(iff) );
-
+ Node* fail = type_check(recv_klass, tklass, prob);
const TypeOopPtr* recv_xtype = tklass->as_instance_type();
assert(recv_xtype->klass_is_exact(), "");
// Subsume downstream occurrences of receiver with a cast to
// recv_xtype, since now we know what the type will be.
Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
- (*casted_receiver) = _gvn.transform(cast);
+ Node* res = _gvn.transform(cast);
+ if (recv_xtype->is_valuetypeptr() && recv_xtype->value_klass()->is_scalarizable()) {
+ assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
+ res = ValueTypeNode::make_from_oop(this, res, recv_xtype->value_klass());
+ }
+
+ (*casted_receiver) = res;
// (User must make the replace_in_map call.)
return fail;
}
+Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
+ float prob) {
+ Node* want_klass = makecon(tklass);
+ Node* cmp = _gvn.transform( new CmpPNode(recv_klass, want_klass));
+ Node* bol = _gvn.transform( new BoolNode(cmp, BoolTest::eq) );
+ IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
+ set_control( _gvn.transform( new IfTrueNode (iff)));
+ Node* fail = _gvn.transform( new IfFalseNode(iff));
+ return fail;
+}
+
//------------------------------subtype_check_receiver-------------------------
Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
Node** casted_receiver) {
const TypeKlassPtr* tklass = TypeKlassPtr::make(klass);
Node* recv_klass = load_object_klass(receiver);
@@ -2967,14 +3075,15 @@
data = method()->method_data()->bci_to_data(bci());
}
bool speculative_not_null = false;
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
&& seems_never_null(obj, data, speculative_not_null));
+ bool is_value = obj->is_ValueType();
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
- Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
+ Node* not_null_obj = is_value ? obj : null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
set_control(null_ctl);
return intcon(0);
@@ -2988,10 +3097,11 @@
region->del_req(_null_path);
phi ->del_req(_null_path);
}
// Do we know the type check always succeed?
+ if (!is_value) {
bool known_statically = false;
if (_gvn.type(superklass)->singleton()) {
ciKlass* superk = _gvn.type(superklass)->is_klassptr()->klass();
ciKlass* subk = _gvn.type(obj)->is_oopptr()->klass();
if (subk != NULL && subk->is_loaded()) {
@@ -3009,18 +3119,27 @@
Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, NULL, spec_obj_type, safe_for_replace);
if (stopped()) { // Profile disagrees with this path.
set_control(null_ctl); // Null is the only remaining possibility.
return intcon(0);
}
- if (cast_obj != NULL) {
+ if (cast_obj != NULL &&
+ // A value that's sometimes null is not something we can optimize well
+ !(cast_obj->is_ValueType() && null_ctl != top())) {
not_null_obj = cast_obj;
+ is_value = not_null_obj->is_ValueType();
+ }
}
}
}
// Load the object's klass
- Node* obj_klass = load_object_klass(not_null_obj);
+ Node* obj_klass = NULL;
+ if (is_value) {
+ obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->is_valuetype()->value_klass()));
+ } else {
+ obj_klass = load_object_klass(not_null_obj);
+ }
// Generate the subtype check
Node* not_subtype_ctrl = gen_subtype_check(obj_klass, superklass);
// Plug in the success path to the general merge in slot 1.
@@ -3036,11 +3155,11 @@
record_for_igvn(region);
// If we know the type check always succeeds then we don't use the
// profiling data at this bytecode. Don't lose it, feed it to the
// type system as a speculative type.
- if (safe_for_replace) {
+ if (safe_for_replace && !is_value) {
Node* casted_obj = record_profiled_receiver_for_speculation(obj);
replace_in_map(obj, casted_obj);
}
return _gvn.transform(phi);
@@ -3051,38 +3170,64 @@
// array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
// uncommon-trap paths work. Adjust stack after this call.
// If failure_control is supplied and not null, it is filled in with
// the control edge for the cast failure. Otherwise, an appropriate
// uncommon trap or exception is thrown.
-Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
- Node* *failure_control) {
+Node* GraphKit::gen_checkcast(Node *obj, Node* superklass, Node* *failure_control, bool never_null) {
kill_dead_locals(); // Benefit all the uncommon traps
- const TypeKlassPtr *tk = _gvn.type(superklass)->is_klassptr();
- const Type *toop = TypeOopPtr::make_from_klass(tk->klass());
+ const TypeKlassPtr* tk = _gvn.type(superklass)->is_klassptr();
+ const TypeOopPtr* toop = TypeOopPtr::make_from_klass(tk->klass());
+ assert(!never_null || toop->is_valuetypeptr(), "must be a value type pointer");
+ bool is_value = obj->is_ValueType();
// Fast cutout: Check the case that the cast is vacuously true.
// This detects the common cases where the test will short-circuit
// away completely. We do this before we perform the null check,
// because if the test is going to turn into zero code, we don't
// want a residual null check left around. (Causes a slowdown,
// for example, in some objArray manipulations, such as a[i]=a[j].)
if (tk->singleton()) {
+ ciKlass* klass = NULL;
+ if (is_value) {
+ klass = _gvn.type(obj)->is_valuetype()->value_klass();
+ } else {
const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
- if (objtp != NULL && objtp->klass() != NULL) {
- switch (C->static_subtype_check(tk->klass(), objtp->klass())) {
+ if (objtp != NULL) {
+ klass = objtp->klass();
+ }
+ }
+ if (klass != NULL) {
+ switch (C->static_subtype_check(tk->klass(), klass)) {
case Compile::SSC_always_true:
// If we know the type check always succeed then we don't use
// the profiling data at this bytecode. Don't lose it, feed it
// to the type system as a speculative type.
- return record_profiled_receiver_for_speculation(obj);
+ if (!is_value) {
+ obj = record_profiled_receiver_for_speculation(obj);
+ if (never_null) {
+ obj = null_check(obj);
+ }
+ if (toop->is_valuetypeptr() && toop->value_klass()->is_scalarizable() && !gvn().type(obj)->maybe_null()) {
+ obj = ValueTypeNode::make_from_oop(this, obj, toop->value_klass());
+ }
+ }
+ return obj;
case Compile::SSC_always_false:
+ if (is_value || never_null) {
+ if (!is_value) {
+ null_check(obj);
+ }
+ // Value type is never null. Always throw an exception.
+ builtin_throw(Deoptimization::Reason_class_check, makecon(TypeKlassPtr::make(klass)));
+ return top();
+ } else {
// It needs a null check because a null will *pass* the cast check.
- // A non-null value will always produce an exception.
return null_assert(obj);
}
}
}
+ }
ciProfileData* data = NULL;
bool safe_for_replace = false;
if (failure_control == NULL) { // use MDO in regular case only
assert(java_bc() == Bytecodes::_aastore ||
@@ -3103,11 +3248,18 @@
bool never_see_null = ((failure_control == NULL) // regular case only
&& seems_never_null(obj, data, speculative_not_null));
// Null check; get casted pointer; set region slot 3
Node* null_ctl = top();
- Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
+ Node* not_null_obj = NULL;
+ if (is_value) {
+ not_null_obj = obj;
+ } else if (never_null) {
+ not_null_obj = null_check(obj);
+ } else {
+ not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
+ }
// If not_null_obj is dead, only null-path is taken
if (stopped()) { // Doing instance-of on a NULL?
set_control(null_ctl);
return null();
@@ -3121,21 +3273,28 @@
region->del_req(_null_path);
phi ->del_req(_null_path);
}
Node* cast_obj = NULL;
- if (tk->klass_is_exact()) {
+ if (!is_value && tk->klass_is_exact()) {
// The following optimization tries to statically cast the speculative type of the object
// (for example obtained during profiling) to the type of the superklass and then do a
// dynamic check that the type of the object is what we expect. To work correctly
// for checkcast and aastore the type of superklass should be exact.
const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
// We may not have profiling here or it may not help us. If we have
// a speculative type use it to perform an exact cast.
ciKlass* spec_obj_type = obj_type->speculative_type();
if (spec_obj_type != NULL || data != NULL) {
cast_obj = maybe_cast_profiled_receiver(not_null_obj, tk->klass(), spec_obj_type, safe_for_replace);
+ if (cast_obj != NULL && cast_obj->is_ValueType()) {
+ if (null_ctl != top()) {
+ cast_obj = NULL; // A value that's sometimes null is not something we can optimize well
+ } else {
+ return cast_obj;
+ }
+ }
if (cast_obj != NULL) {
if (failure_control != NULL) // failure is now impossible
(*failure_control) = top();
// adjust the type of the phi to the exact klass:
phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
@@ -3143,17 +3302,22 @@
}
}
if (cast_obj == NULL) {
// Load the object's klass
- Node* obj_klass = load_object_klass(not_null_obj);
+ Node* obj_klass = NULL;
+ if (is_value) {
+ obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->is_valuetype()->value_klass()));
+ } else {
+ obj_klass = load_object_klass(not_null_obj);
+ }
// Generate the subtype check
Node* not_subtype_ctrl = gen_subtype_check( obj_klass, superklass );
// Plug in success path into the merge
- cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
+ cast_obj = is_value ? not_null_obj : _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
// Failure path ends in uncommon trap (or may be dead - failure impossible)
if (failure_control == NULL) {
if (not_subtype_ctrl != top()) { // If failure is possible
PreserveJVMState pjvms(this);
set_control(not_subtype_ctrl);
@@ -3182,13 +3346,108 @@
// Return final merged results
set_control( _gvn.transform(region) );
record_for_igvn(region);
- return record_profiled_receiver_for_speculation(res);
+ if (!is_value) {
+ res = record_profiled_receiver_for_speculation(res);
+ if (toop->is_valuetypeptr() && toop->value_klass()->is_scalarizable() && !gvn().type(res)->maybe_null()) {
+ res = ValueTypeNode::make_from_oop(this, res, toop->value_klass());
+ }
+ }
+ return res;
+}
+
+Node* GraphKit::is_always_locked(Node* obj) {
+ Node* mark_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
+ Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
+ Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern);
+ return _gvn.transform(new AndXNode(mark, value_mask));
+}
+
+Node* GraphKit::gen_value_type_test(Node* kls) {
+ Node* flags_addr = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
+ Node* flags = make_load(NULL, flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
+ Node* is_value = _gvn.transform(new AndINode(flags, intcon(JVM_ACC_VALUE)));
+ Node* cmp = _gvn.transform(new CmpINode(is_value, intcon(0)));
+ return cmp;
+}
+
+// Deoptimize if 'obj' is a value type
+void GraphKit::gen_value_type_guard(Node* obj, int nargs) {
+ assert(EnableValhalla, "should only be used if value types are enabled");
+ Node* bol = NULL;
+ if (obj->is_ValueTypeBase()) {
+ bol = intcon(0);
+ } else {
+ Node* is_value = is_always_locked(obj);
+ Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern);
+ Node* cmp = _gvn.transform(new CmpXNode(is_value, value_mask));
+ bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
+ }
+ { BuildCutout unless(this, bol, PROB_MAX);
+ inc_sp(nargs);
+ uncommon_trap(Deoptimization::Reason_class_check,
+ Deoptimization::Action_none);
+ }
+}
+
+// Deoptimize if 'ary' is flattened or if 'obj' is null and 'ary' is a value type array
+void GraphKit::gen_value_type_array_guard(Node* ary, Node* obj, int nargs) {
+ assert(EnableValhalla, "should only be used if value types are enabled");
+ // Load array element klass
+ Node* kls = load_object_klass(ary);
+ Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset()));
+ Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
+ // Check if element is a value type
+ Node* flags_addr = basic_plus_adr(elem_klass, in_bytes(Klass::access_flags_offset()));
+ Node* flags = make_load(NULL, flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
+ Node* is_value_elem = _gvn.transform(new AndINode(flags, intcon(JVM_ACC_VALUE)));
+
+ const Type* objtype = _gvn.type(obj);
+ if (objtype == TypePtr::NULL_PTR) {
+ // Object is always null, check if array is a value type array
+ Node* cmp = _gvn.transform(new CmpINode(is_value_elem, intcon(0)));
+ Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+ { BuildCutout unless(this, bol, PROB_MAX);
+ // TODO just deoptimize for now if we store null to a value type array
+ inc_sp(nargs);
+ uncommon_trap(Deoptimization::Reason_array_check,
+ Deoptimization::Action_none);
+ }
+ } else {
+ // Check if (is_value_elem && obj_is_null) <=> (!is_value_elem | !obj_is_null == 0)
+ // TODO what if we later figure out that obj is never null?
+ Node* not_value = _gvn.transform(new XorINode(is_value_elem, intcon(JVM_ACC_VALUE)));
+ not_value = _gvn.transform(new ConvI2LNode(not_value));
+ Node* not_null = _gvn.transform(new CastP2XNode(NULL, obj));
+ Node* both = _gvn.transform(new OrLNode(not_null, not_value));
+ Node* cmp = _gvn.transform(new CmpLNode(both, longcon(0)));
+ Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
+ { BuildCutout unless(this, bol, PROB_MAX);
+ // TODO just deoptimize for now if we store null to a value type array
+ inc_sp(nargs);
+ uncommon_trap(Deoptimization::Reason_array_check,
+ Deoptimization::Action_none);
+ }
+ }
}
+Node* GraphKit::load_lh_array_tag(Node* kls) {
+ Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset()));
+ Node* layout_val = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
+ return _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift)));
+}
+
+
+Node* GraphKit::gen_lh_array_test(Node* kls, unsigned int lh_value) {
+ Node* layout_val = load_lh_array_tag(kls);
+ Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(lh_value)));
+ return cmp;
+}
+
+
//------------------------------next_monitor-----------------------------------
// What number should be given to the next monitor?
int GraphKit::next_monitor() {
int current = jvms()->monitor_depth()* C->sync_stack_slots();
int next = current + C->sync_stack_slots();
@@ -3250,10 +3509,17 @@
// %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
assert(SynchronizationEntryBCI == InvocationEntryBci, "");
if( !GenerateSynchronizationCode )
return NULL; // Not locking things?
+
+ // We cannot lock on a value type
+ const TypeOopPtr* objptr = _gvn.type(obj)->make_oopptr();
+ if (objptr->can_be_value_type()) {
+ gen_value_type_guard(obj, 1);
+ }
+
if (stopped()) // Dead monitor?
return NULL;
assert(dead_locals_are_killed(), "should kill locals before sync. point");
@@ -3324,10 +3590,11 @@
return;
if (stopped()) { // Dead monitor?
map()->pop_monitor(); // Kill monitor from debug info
return;
}
+ assert(!obj->is_ValueTypeBase(), "should not unlock on value type");
// Memory barrier to avoid floating things down past the locked region
insert_mem_bar(Op_MemBarReleaseLock);
const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
@@ -3364,12 +3631,18 @@
// almost always feature constant types.
Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr();
if (!StressReflectiveCode && inst_klass != NULL) {
ciKlass* klass = inst_klass->klass();
+ assert(klass != NULL, "klass should not be NULL");
bool xklass = inst_klass->klass_is_exact();
- if (xklass || klass->is_array_klass()) {
+ bool can_be_value_array = false;
+ if (klass->is_array_klass() && EnableValhalla && ValueArrayFlatten) {
+ ciKlass* elem = klass->as_array_klass()->element_klass();
+ can_be_value_array = elem != NULL && (elem->is_java_lang_Object() || elem->is_interface());
+ }
+ if (xklass || (klass->is_array_klass() && !can_be_value_array)) {
jint lhelper = klass->layout_helper();
if (lhelper != Klass::_lh_neutral_value) {
constant_value = lhelper;
return (Node*) NULL;
}
@@ -3427,21 +3700,38 @@
// and link them properly (as a group) to the InitializeNode.
assert(init->in(InitializeNode::Memory) == malloc, "");
MergeMemNode* minit_in = MergeMemNode::make(malloc);
init->set_req(InitializeNode::Memory, minit_in);
record_for_igvn(minit_in); // fold it up later, if possible
+ _gvn.set_type(minit_in, Type::MEMORY);
Node* minit_out = memory(rawidx);
assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
// Add an edge in the MergeMem for the header fields so an access
// to one of those has correct memory state
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
if (oop_type->isa_aryptr()) {
+ const TypeAryPtr* arytype = oop_type->is_aryptr();
+ if (arytype->klass()->is_value_array_klass()) {
+ ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass();
+ ciValueKlass* vk = vak->element_klass()->as_value_klass();
+ for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
+ ciField* field = vk->nonstatic_field_at(i);
+ if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
+ continue; // do not bother to track really large numbers of fields
+ int off_in_vt = field->offset() - vk->first_field_offset();
+ const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
+ int fieldidx = C->get_alias_index(adr_type);
+ hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
+ }
+ } else {
const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
int elemidx = C->get_alias_index(telemref);
hook_memory_on_init(*this, elemidx, minit_in, minit_out);
+ }
} else if (oop_type->isa_instptr()) {
+ set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
ciField* field = ik->nonstatic_field_at(i);
if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
continue; // do not bother to track really large numbers of fields
@@ -3488,18 +3778,19 @@
// - If 'return_size_val', report the the total object size to the caller.
// - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
Node* GraphKit::new_instance(Node* klass_node,
Node* extra_slow_test,
Node* *return_size_val,
- bool deoptimize_on_exception) {
+ bool deoptimize_on_exception,
+ ValueTypeBaseNode* value_node) {
// Compute size in doublewords
// The size is always an integral number of doublewords, represented
// as a positive bytewise size stored in the klass's layout_helper.
// The layout_helper also encodes (in a low bit) the need for a slow path.
jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con);
- int layout_is_con = (layout_val == NULL);
+ bool layout_is_con = (layout_val == NULL);
if (extra_slow_test == NULL) extra_slow_test = intcon(0);
// Generate the initial go-slow test. It's either ALWAYS (return a
// Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
// case) a computed value derived from the layout_helper.
@@ -3546,34 +3837,42 @@
const TypeOopPtr* oop_type = tklass->as_instance_type();
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
- // since GC and deoptimization can happened.
+ // since GC and deoptimization can happen.
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
control(), mem, i_o(),
size, klass_node,
- initial_slow_test);
+ initial_slow_test, value_node);
return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
}
+// With compressed oops, the 64 bit init value for non flattened value
+// arrays is built from 2 32 bit compressed oops
+static Node* raw_default_for_coops(Node* default_value, GraphKit& kit) {
+ Node* lower = kit.gvn().transform(new CastP2XNode(kit.control(), default_value));
+ Node* upper = kit.gvn().transform(new LShiftLNode(lower, kit.intcon(32)));
+ return kit.gvn().transform(new OrLNode(lower, upper));
+}
+
//-------------------------------new_array-------------------------------------
-// helper for both newarray and anewarray
+// helper for newarray and anewarray
// The 'length' parameter is (obviously) the length of the array.
// See comments on new_instance for the meaning of the other arguments.
Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* length, // number of array elements
int nargs, // number of arguments to push back for uncommon trap
Node* *return_size_val,
bool deoptimize_on_exception) {
jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con);
- int layout_is_con = (layout_val == NULL);
+ bool layout_is_con = (layout_val == NULL);
if (!layout_is_con && !StressReflectiveCode &&
!too_many_traps(Deoptimization::Reason_class_check)) {
// This is a reflective array creation site.
// Optimistically assume that it is a subtype of Object[],
@@ -3599,11 +3898,11 @@
int fast_size_limit = FastAllocateSizeLimit;
if (layout_is_con) {
assert(!StressReflectiveCode, "stress mode does not use these paths");
// Increase the size limit if we have exact knowledge of array type.
int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
- fast_size_limit <<= (LogBytesPerLong - log2_esize);
+ fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
}
Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
@@ -3618,13 +3917,14 @@
// (T_BYTE has the weakest alignment and size restrictions...)
if (layout_is_con) {
int hsize = Klass::layout_helper_header_size(layout_con);
int eshift = Klass::layout_helper_log2_element_size(layout_con);
BasicType etype = Klass::layout_helper_element_type(layout_con);
+ bool is_value_array = Klass::layout_helper_is_valueArray(layout_con);
if ((round_mask & ~right_n_bits(eshift)) == 0)
round_mask = 0; // strength-reduce it if it goes away completely
- assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
+ assert(is_value_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
assert(header_size_min <= hsize, "generic minimum is smallest");
header_size_min = hsize;
header_size = intcon(hsize + round_mask);
} else {
Node* hss = intcon(Klass::_lh_header_size_shift);
@@ -3704,33 +4004,106 @@
}
// Now generate allocation code
// The entire memory state is needed for slow path of the allocation
- // since GC and deoptimization can happened.
+ // since GC and deoptimization can happen.
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
if (initial_slow_test->is_Bool()) {
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
}
+ const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
+ const TypeAryPtr* ary_ptr = ary_type->isa_aryptr();
+ const Type* elem = NULL;
+ ciKlass* elem_klass = NULL;
+ if (ary_ptr != NULL) {
+ elem = ary_ptr->elem();
+ elem_klass = ary_ptr->klass()->as_array_klass()->element_klass();
+ }
+ Node* default_value = NULL;
+ Node* raw_default_value = NULL;
+ if (elem != NULL && elem->make_ptr()) {
+ if (elem_klass != NULL && elem_klass->is_valuetype()) {
+ ciValueKlass* vk = elem_klass->as_value_klass();
+ if (!vk->flatten_array()) {
+ default_value = ValueTypeNode::default_oop(gvn(), vk);
+ if (elem->isa_narrowoop()) {
+ default_value = _gvn.transform(new EncodePNode(default_value, elem));
+ raw_default_value = raw_default_for_coops(default_value, *this);
+ } else {
+ raw_default_value = _gvn.transform(new CastP2XNode(control(), default_value));
+ }
+ }
+ }
+ }
+
+ if (EnableValhalla && (elem == NULL || (elem_klass != NULL && elem_klass->is_java_lang_Object() && !ary_type->klass_is_exact()))) {
+ assert(raw_default_value == NULL, "shouldn't be set yet");
+
+ // unkown array type, could be a non flattened value array that's
+ // initialize to a non zero default value
+
+ Node* r = new RegionNode(4);
+ Node* phi = new PhiNode(r, TypeX_X);
+
+ Node* cmp = gen_lh_array_test(klass_node, Klass::_lh_array_tag_obj_value);
+ Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+ IfNode* iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
+ r->init_req(1, _gvn.transform(new IfFalseNode(iff)));
+ phi->init_req(1, MakeConX(0));
+ set_control(_gvn.transform(new IfTrueNode(iff)));
+ Node* k_adr = basic_plus_adr(klass_node, in_bytes(ArrayKlass::element_klass_offset()));
+ Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, control(), immutable_memory(), k_adr, TypeInstPtr::KLASS));
+ cmp = gen_value_type_test(elem_klass);
+ bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
+ iff = create_and_map_if(control(), bol, PROB_FAIR, COUNT_UNKNOWN);
+ r->init_req(2, _gvn.transform(new IfTrueNode(iff)));
+ phi->init_req(2, MakeConX(0));
+ set_control(_gvn.transform(new IfFalseNode(iff)));
+
+ Node* adr_fixed_block_addr = basic_plus_adr(elem_klass, in_bytes(InstanceKlass::adr_valueklass_fixed_block_offset()));
+ Node* adr_fixed_block = make_load(control(), adr_fixed_block_addr, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
+
+ Node* default_value_offset_addr = basic_plus_adr(adr_fixed_block, in_bytes(ValueKlass::default_value_offset_offset()));
+ Node* default_value_offset = make_load(control(), default_value_offset_addr, TypeInt::INT, T_INT, MemNode::unordered);
+
+ Node* elem_mirror = load_mirror_from_klass(elem_klass);
+
+ Node* default_value_addr = basic_plus_adr(elem_mirror, ConvI2X(default_value_offset));
+ const TypePtr* adr_type = _gvn.type(default_value_addr)->is_ptr();
+ Node* val = access_load_at(elem_mirror, default_value_addr, adr_type, TypeInstPtr::BOTTOM, T_OBJECT, IN_HEAP);
+
+ if (UseCompressedOops) {
+ val = _gvn.transform(new EncodePNode(val, elem));
+ val = raw_default_for_coops(val, *this);
+ } else {
+ val = _gvn.transform(new CastP2XNode(control(), val));
+ }
+ r->init_req(3, control());
+ phi->init_req(3, val);
+ set_control(_gvn.transform(r));
+ raw_default_value = _gvn.transform(phi);
+ }
+
// Create the AllocateArrayNode and its result projections
AllocateArrayNode* alloc
= new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
control(), mem, i_o(),
size, klass_node,
initial_slow_test,
- length);
+ length, default_value,
+ raw_default_value);
// Cast to correct type. Note that the klass_node may be constant or not,
// and in the latter case the actual array type will be inexact also.
// (This happens via a non-constant argument to inline_native_newArray.)
// In any case, the value of klass_node provides the desired array type.
const TypeInt* length_type = _gvn.find_int_type(length);
- const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
if (ary_type->isa_aryptr() && length_type != NULL) {
// Try to get a better type than POS for the size
ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
}
@@ -3881,15 +4254,15 @@
}
Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
int value_offset = java_lang_String::value_offset_in_bytes();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
- false, NULL, 0);
+ false, NULL, Type::Offset(0));
const TypePtr* value_field_type = string_type->add_offset(value_offset);
const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
TypeAry::make(TypeInt::BYTE, TypeInt::POS),
- ciTypeArrayKlass::make(T_BYTE), true, 0);
+ ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
Node* p = basic_plus_adr(str, str, value_offset);
Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
return load;
}
@@ -3898,11 +4271,11 @@
if (!CompactStrings) {
return intcon(java_lang_String::CODER_UTF16);
}
int coder_offset = java_lang_String::coder_offset_in_bytes();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
- false, NULL, 0);
+ false, NULL, Type::Offset(0));
const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
Node* p = basic_plus_adr(str, str, coder_offset);
Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
@@ -3910,21 +4283,21 @@
}
void GraphKit::store_String_value(Node* str, Node* value) {
int value_offset = java_lang_String::value_offset_in_bytes();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
- false, NULL, 0);
+ false, NULL, Type::Offset(0));
const TypePtr* value_field_type = string_type->add_offset(value_offset);
access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
}
void GraphKit::store_String_coder(Node* str, Node* value) {
int coder_offset = java_lang_String::coder_offset_in_bytes();
const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
- false, NULL, 0);
+ false, NULL, Type::Offset(0));
const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
}
@@ -4033,9 +4406,24 @@
}
}
const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
/*is_unsigned_load=*/false);
if (con_type != NULL) {
- return makecon(con_type);
+ Node* con = makecon(con_type);
+ if (field->layout_type() == T_VALUETYPE && field->type()->as_value_klass()->is_scalarizable()) {
+ // Load value type from constant oop
+ assert(!con_type->maybe_null(), "should never be null");
+ con = ValueTypeNode::make_from_oop(this, con, field->type()->as_value_klass());
+ }
+ return con;
}
return NULL;
}
+
+//---------------------------load_mirror_from_klass----------------------------
+// Given a klass oop, load its java mirror (a java.lang.Class oop).
+Node* GraphKit::load_mirror_from_klass(Node* klass) {
+ Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
+ Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
+ // mirror = ((OopHandle)mirror)->resolve();
+ return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
+}
< prev index next >