1664 }
1665
1666 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1667 if (stopped()) return top(); // maybe the call folded up?
1668
1669 // Capture the return value, if any.
1670 Node* ret;
1671 if (call->method() == NULL ||
1672 call->method()->return_type()->basic_type() == T_VOID)
1673 ret = top();
1674 else {
1675 if (!call->tf()->returns_value_type_as_fields()) {
1676 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1677 } else {
1678 // Return of multiple values (value type fields): we create a
1679 // ValueType node, each field is a projection from the call.
1680 const TypeTuple *range_sig = call->tf()->range_sig();
1681 const Type* t = range_sig->field_at(TypeFunc::Parms);
1682 assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1683 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1684 ret = C->create_vt_node(call, vk, vk, 0, TypeFunc::Parms+1, false);
1685 }
1686 }
1687
1688 // Note: Since any out-of-line call can produce an exception,
1689 // we always insert an I_O projection from the call into the result.
1690
1691 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1692
1693 if (separate_io_proj) {
1694 // The caller requested separate projections be used by the fall
1695 // through and exceptional paths, so replace the projections for
1696 // the fall through path.
1697 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1698 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1699 }
1700 return ret;
1701 }
1702
1703 //--------------------set_predefined_input_for_runtime_call--------------------
1704 // Reading and setting the memory state is way conservative here.
3336 //---------------------------new_instance--------------------------------------
3337 // This routine takes a klass_node which may be constant (for a static type)
3338 // or may be non-constant (for reflective code). It will work equally well
3339 // for either, and the graph will fold nicely if the optimizer later reduces
3340 // the type to a constant.
3341 // The optional arguments are for specialized use by intrinsics:
3342 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3343 // - If 'return_size_val', report the the total object size to the caller.
3344 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3345 Node* GraphKit::new_instance(Node* klass_node,
3346 Node* extra_slow_test,
3347 Node* *return_size_val,
3348 bool deoptimize_on_exception,
3349 ValueTypeNode* value_node) {
3350 // Compute size in doublewords
3351 // The size is always an integral number of doublewords, represented
3352 // as a positive bytewise size stored in the klass's layout_helper.
3353 // The layout_helper also encodes (in a low bit) the need for a slow path.
3354 jint layout_con = Klass::_lh_neutral_value;
3355 Node* layout_val = get_layout_helper(klass_node, layout_con);
3356 int layout_is_con = (layout_val == NULL);
3357
3358 if (extra_slow_test == NULL) extra_slow_test = intcon(0);
3359 // Generate the initial go-slow test. It's either ALWAYS (return a
3360 // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3361 // case) a computed value derived from the layout_helper.
3362 Node* initial_slow_test = NULL;
3363 if (layout_is_con) {
3364 assert(!StressReflectiveCode, "stress mode does not use these paths");
3365 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3366 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3367 } else { // reflective case
3368 // This reflective path is used by Unsafe.allocateInstance.
3369 // (It may be stress-tested by specifying StressReflectiveCode.)
3370 // Basically, we want to get into the VM is there's an illegal argument.
3371 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3372 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3373 if (extra_slow_test != intcon(0)) {
3374 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3375 }
3376 // (Macro-expander will further convert this to a Bool, if necessary.)
3410
3411 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3412 control(), mem, i_o(),
3413 size, klass_node,
3414 initial_slow_test, value_node);
3415
3416 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3417 }
3418
3419 //-------------------------------new_array-------------------------------------
3420 // helper for newarray and anewarray
3421 // The 'length' parameter is (obviously) the length of the array.
3422 // See comments on new_instance for the meaning of the other arguments.
3423 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3424 Node* length, // number of array elements
3425 int nargs, // number of arguments to push back for uncommon trap
3426 Node* *return_size_val,
3427 bool deoptimize_on_exception) {
3428 jint layout_con = Klass::_lh_neutral_value;
3429 Node* layout_val = get_layout_helper(klass_node, layout_con);
3430 int layout_is_con = (layout_val == NULL);
3431
3432 if (!layout_is_con && !StressReflectiveCode &&
3433 !too_many_traps(Deoptimization::Reason_class_check)) {
3434 // This is a reflective array creation site.
3435 // Optimistically assume that it is a subtype of Object[],
3436 // so that we can fold up all the address arithmetic.
3437 layout_con = Klass::array_layout_helper(T_OBJECT);
3438 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3439 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3440 { BuildCutout unless(this, bol_lh, PROB_MAX);
3441 inc_sp(nargs);
3442 uncommon_trap(Deoptimization::Reason_class_check,
3443 Deoptimization::Action_maybe_recompile);
3444 }
3445 layout_val = NULL;
3446 layout_is_con = true;
3447 }
3448
3449 // Generate the initial go-slow test. Make sure we do not overflow
3450 // if length is huge (near 2Gig) or negative! We do not need
|
1664 }
1665
1666 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) {
1667 if (stopped()) return top(); // maybe the call folded up?
1668
1669 // Capture the return value, if any.
1670 Node* ret;
1671 if (call->method() == NULL ||
1672 call->method()->return_type()->basic_type() == T_VOID)
1673 ret = top();
1674 else {
1675 if (!call->tf()->returns_value_type_as_fields()) {
1676 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1677 } else {
1678 // Return of multiple values (value type fields): we create a
1679 // ValueType node, each field is a projection from the call.
1680 const TypeTuple *range_sig = call->tf()->range_sig();
1681 const Type* t = range_sig->field_at(TypeFunc::Parms);
1682 assert(t->isa_valuetypeptr(), "only value types for multiple return values");
1683 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
1684 ret = ValueTypeNode::make(_gvn, call, vk, TypeFunc::Parms+1, false);
1685 }
1686 }
1687
1688 // Note: Since any out-of-line call can produce an exception,
1689 // we always insert an I_O projection from the call into the result.
1690
1691 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj);
1692
1693 if (separate_io_proj) {
1694 // The caller requested separate projections be used by the fall
1695 // through and exceptional paths, so replace the projections for
1696 // the fall through path.
1697 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1698 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1699 }
1700 return ret;
1701 }
1702
1703 //--------------------set_predefined_input_for_runtime_call--------------------
1704 // Reading and setting the memory state is way conservative here.
3336 //---------------------------new_instance--------------------------------------
3337 // This routine takes a klass_node which may be constant (for a static type)
3338 // or may be non-constant (for reflective code). It will work equally well
3339 // for either, and the graph will fold nicely if the optimizer later reduces
3340 // the type to a constant.
3341 // The optional arguments are for specialized use by intrinsics:
3342 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3343 // - If 'return_size_val', report the the total object size to the caller.
3344 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3345 Node* GraphKit::new_instance(Node* klass_node,
3346 Node* extra_slow_test,
3347 Node* *return_size_val,
3348 bool deoptimize_on_exception,
3349 ValueTypeNode* value_node) {
3350 // Compute size in doublewords
3351 // The size is always an integral number of doublewords, represented
3352 // as a positive bytewise size stored in the klass's layout_helper.
3353 // The layout_helper also encodes (in a low bit) the need for a slow path.
3354 jint layout_con = Klass::_lh_neutral_value;
3355 Node* layout_val = get_layout_helper(klass_node, layout_con);
3356 bool layout_is_con = (layout_val == NULL);
3357
3358 if (extra_slow_test == NULL) extra_slow_test = intcon(0);
3359 // Generate the initial go-slow test. It's either ALWAYS (return a
3360 // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3361 // case) a computed value derived from the layout_helper.
3362 Node* initial_slow_test = NULL;
3363 if (layout_is_con) {
3364 assert(!StressReflectiveCode, "stress mode does not use these paths");
3365 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3366 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3367 } else { // reflective case
3368 // This reflective path is used by Unsafe.allocateInstance.
3369 // (It may be stress-tested by specifying StressReflectiveCode.)
3370 // Basically, we want to get into the VM is there's an illegal argument.
3371 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3372 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3373 if (extra_slow_test != intcon(0)) {
3374 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3375 }
3376 // (Macro-expander will further convert this to a Bool, if necessary.)
3410
3411 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3412 control(), mem, i_o(),
3413 size, klass_node,
3414 initial_slow_test, value_node);
3415
3416 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3417 }
3418
3419 //-------------------------------new_array-------------------------------------
3420 // helper for newarray and anewarray
3421 // The 'length' parameter is (obviously) the length of the array.
3422 // See comments on new_instance for the meaning of the other arguments.
3423 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3424 Node* length, // number of array elements
3425 int nargs, // number of arguments to push back for uncommon trap
3426 Node* *return_size_val,
3427 bool deoptimize_on_exception) {
3428 jint layout_con = Klass::_lh_neutral_value;
3429 Node* layout_val = get_layout_helper(klass_node, layout_con);
3430 bool layout_is_con = (layout_val == NULL);
3431
3432 if (!layout_is_con && !StressReflectiveCode &&
3433 !too_many_traps(Deoptimization::Reason_class_check)) {
3434 // This is a reflective array creation site.
3435 // Optimistically assume that it is a subtype of Object[],
3436 // so that we can fold up all the address arithmetic.
3437 layout_con = Klass::array_layout_helper(T_OBJECT);
3438 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3439 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3440 { BuildCutout unless(this, bol_lh, PROB_MAX);
3441 inc_sp(nargs);
3442 uncommon_trap(Deoptimization::Reason_class_check,
3443 Deoptimization::Action_maybe_recompile);
3444 }
3445 layout_val = NULL;
3446 layout_is_con = true;
3447 }
3448
3449 // Generate the initial go-slow test. Make sure we do not overflow
3450 // if length is huge (near 2Gig) or negative! We do not need
|