1495 BasicType bt,
1496 bool use_precise,
1497 MemNode::MemOrd mo,
1498 bool mismatched) {
1499 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1500 // could be delayed during Parse (for example, in adjust_map_after_if()).
1501 // Execute transformation here to avoid barrier generation in such case.
1502 if (_gvn.type(val) == TypePtr::NULL_PTR)
1503 val = _gvn.makecon(TypePtr::NULL_PTR);
1504
1505 set_control(ctl);
1506 if (stopped()) return top(); // Dead path ?
1507
1508 assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity");
1509 assert(val != NULL, "not dead path");
1510 uint adr_idx = C->get_alias_index(adr_type);
1511 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1512
1513 if (bt == T_VALUETYPE) {
1514 // Allocate value type and store oop
1515 val = val->as_ValueType()->store_to_memory(this);
1516 }
1517
1518 pre_barrier(true /* do_load */,
1519 control(), obj, adr, adr_idx, val, val_type,
1520 NULL /* pre_val */,
1521 bt);
1522
1523 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1524 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1525 return store;
1526 }
1527
1528 // Could be an array or object we don't know at compile time (unsafe ref.)
1529 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1530 Node* obj, // containing obj
1531 Node* adr, // actual adress to store val at
1532 const TypePtr* adr_type,
1533 Node* val,
1534 BasicType bt,
1535 MemNode::MemOrd mo,
1593 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1594 return ld;
1595 }
1596
1597 //-------------------------set_arguments_for_java_call-------------------------
1598 // Arguments (pre-popped from the stack) are taken from the JVMS.
1599 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1600 // Add the call arguments:
1601 const TypeTuple* domain = call->tf()->domain_sig();
1602 uint nargs = domain->cnt();
1603 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1604 Node* arg = argument(i-TypeFunc::Parms);
1605 if (ValueTypePassFieldsAsArgs) {
1606 if (arg->is_ValueType()) {
1607 ValueTypeNode* vt = arg->as_ValueType();
1608 if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
1609 // We don't pass value type arguments by reference but instead
1610 // pass each field of the value type
1611 idx += vt->pass_fields(call, idx, *this);
1612 } else {
1613 arg = arg->as_ValueType()->store_to_memory(this);
1614 call->init_req(idx, arg);
1615 idx++;
1616 }
1617 // If a value type argument is passed as fields, attach the Method* to the call site
1618 // to be able to access the extended signature later via attached_method_before_pc().
1619 // For example, see CompiledMethod::preserve_callee_argument_oops().
1620 call->set_override_symbolic_info(true);
1621 } else {
1622 call->init_req(idx, arg);
1623 idx++;
1624 }
1625 } else {
1626 if (arg->is_ValueType()) {
1627 // Pass value type argument via oop to callee
1628 arg = arg->as_ValueType()->store_to_memory(this);
1629 }
1630 call->init_req(i, arg);
1631 }
1632 }
1633 }
1634
1635 //---------------------------set_edges_for_java_call---------------------------
1636 // Connect a newly created call into the current JVMS.
1637 // A return value node (if any) is returned from set_edges_for_java_call.
1638 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1639
1640 // Add the predefined inputs:
1641 call->init_req( TypeFunc::Control, control() );
1642 call->init_req( TypeFunc::I_O , i_o() );
1643 call->init_req( TypeFunc::Memory , reset_memory() );
1644 call->init_req( TypeFunc::FramePtr, frameptr() );
1645 call->init_req( TypeFunc::ReturnAdr, top() );
1646
1647 add_safepoint_edges(call, must_throw);
1648
3326 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3327 }
3328 }
3329 #endif //ASSERT
3330
3331 return javaoop;
3332 }
3333
3334 //---------------------------new_instance--------------------------------------
3335 // This routine takes a klass_node which may be constant (for a static type)
3336 // or may be non-constant (for reflective code). It will work equally well
3337 // for either, and the graph will fold nicely if the optimizer later reduces
3338 // the type to a constant.
3339 // The optional arguments are for specialized use by intrinsics:
3340 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3341 // - If 'return_size_val', report the the total object size to the caller.
3342 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3343 Node* GraphKit::new_instance(Node* klass_node,
3344 Node* extra_slow_test,
3345 Node* *return_size_val,
3346 bool deoptimize_on_exception) {
3347 // Compute size in doublewords
3348 // The size is always an integral number of doublewords, represented
3349 // as a positive bytewise size stored in the klass's layout_helper.
3350 // The layout_helper also encodes (in a low bit) the need for a slow path.
3351 jint layout_con = Klass::_lh_neutral_value;
3352 Node* layout_val = get_layout_helper(klass_node, layout_con);
3353 int layout_is_con = (layout_val == NULL);
3354
3355 if (extra_slow_test == NULL) extra_slow_test = intcon(0);
3356 // Generate the initial go-slow test. It's either ALWAYS (return a
3357 // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3358 // case) a computed value derived from the layout_helper.
3359 Node* initial_slow_test = NULL;
3360 if (layout_is_con) {
3361 assert(!StressReflectiveCode, "stress mode does not use these paths");
3362 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3363 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3364 } else { // reflective case
3365 // This reflective path is used by Unsafe.allocateInstance.
3366 // (It may be stress-tested by specifying StressReflectiveCode.)
3391 (*return_size_val) = size;
3392 }
3393
3394 // This is a precise notnull oop of the klass.
3395 // (Actually, it need not be precise if this is a reflective allocation.)
3396 // It's what we cast the result to.
3397 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3398 if (!tklass) tklass = TypeKlassPtr::OBJECT;
3399 const TypeOopPtr* oop_type = tklass->as_instance_type();
3400
3401 // Now generate allocation code
3402
3403 // The entire memory state is needed for slow path of the allocation
3404 // since GC and deoptimization can happen.
3405 Node *mem = reset_memory();
3406 set_all_memory(mem); // Create new memory state
3407
3408 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3409 control(), mem, i_o(),
3410 size, klass_node,
3411 initial_slow_test);
3412
3413 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3414 }
3415
3416 //-------------------------------new_array-------------------------------------
3417 // helper for newarray and anewarray
3418 // The 'length' parameter is (obviously) the length of the array.
3419 // See comments on new_instance for the meaning of the other arguments.
3420 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3421 Node* length, // number of array elements
3422 int nargs, // number of arguments to push back for uncommon trap
3423 Node* *return_size_val,
3424 bool deoptimize_on_exception) {
3425 jint layout_con = Klass::_lh_neutral_value;
3426 Node* layout_val = get_layout_helper(klass_node, layout_con);
3427 int layout_is_con = (layout_val == NULL);
3428
3429 if (!layout_is_con && !StressReflectiveCode &&
3430 !too_many_traps(Deoptimization::Reason_class_check)) {
3431 // This is a reflective array creation site.
3622 if (stopped()) {
3623 set_control(null_ctl); // Always zero
3624 return;
3625 }
3626
3627 // Prepare for merging control and IO
3628 RegionNode* res_ctl = new RegionNode(3);
3629 res_ctl->init_req(1, null_ctl);
3630 gvn().set_type(res_ctl, Type::CONTROL);
3631 record_for_igvn(res_ctl);
3632 Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3633 gvn().set_type(res_io, Type::ABIO);
3634 record_for_igvn(res_io);
3635
3636 // TODO comment
3637 SafePointNode* loop_map = NULL;
3638 {
3639 PreserveJVMState pjvms(this);
3640 // Create default value type and store it to memory
3641 Node* oop = ValueTypeNode::make_default(gvn(), vk);
3642 oop = oop->as_ValueType()->store_to_memory(this);
3643
3644 length = SubI(length, intcon(1));
3645 add_predicate(nargs);
3646 RegionNode* loop = new RegionNode(3);
3647 loop->init_req(1, control());
3648 gvn().set_type(loop, Type::CONTROL);
3649 record_for_igvn(loop);
3650
3651 Node* index = new PhiNode(loop, TypeInt::INT);
3652 index->init_req(1, intcon(0));
3653 gvn().set_type(index, TypeInt::INT);
3654 record_for_igvn(index);
3655
3656 // TODO explain why we need to capture all memory
3657 PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3658 mem->init_req(1, reset_memory());
3659 gvn().set_type(mem, Type::MEMORY);
3660 record_for_igvn(mem);
3661 set_control(loop);
3662 set_all_memory(mem);
|
1495 BasicType bt,
1496 bool use_precise,
1497 MemNode::MemOrd mo,
1498 bool mismatched) {
1499 // Transformation of a value which could be NULL pointer (CastPP #NULL)
1500 // could be delayed during Parse (for example, in adjust_map_after_if()).
1501 // Execute transformation here to avoid barrier generation in such case.
1502 if (_gvn.type(val) == TypePtr::NULL_PTR)
1503 val = _gvn.makecon(TypePtr::NULL_PTR);
1504
1505 set_control(ctl);
1506 if (stopped()) return top(); // Dead path ?
1507
1508 assert(bt == T_OBJECT || bt == T_VALUETYPE, "sanity");
1509 assert(val != NULL, "not dead path");
1510 uint adr_idx = C->get_alias_index(adr_type);
1511 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
1512
1513 if (bt == T_VALUETYPE) {
1514 // Allocate value type and store oop
1515 val = val->as_ValueType()->allocate(this);
1516 }
1517
1518 pre_barrier(true /* do_load */,
1519 control(), obj, adr, adr_idx, val, val_type,
1520 NULL /* pre_val */,
1521 bt);
1522
1523 Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
1524 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
1525 return store;
1526 }
1527
1528 // Could be an array or object we don't know at compile time (unsafe ref.)
1529 Node* GraphKit::store_oop_to_unknown(Node* ctl,
1530 Node* obj, // containing obj
1531 Node* adr, // actual adress to store val at
1532 const TypePtr* adr_type,
1533 Node* val,
1534 BasicType bt,
1535 MemNode::MemOrd mo,
1593 Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
1594 return ld;
1595 }
1596
1597 //-------------------------set_arguments_for_java_call-------------------------
1598 // Arguments (pre-popped from the stack) are taken from the JVMS.
1599 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1600 // Add the call arguments:
1601 const TypeTuple* domain = call->tf()->domain_sig();
1602 uint nargs = domain->cnt();
1603 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1604 Node* arg = argument(i-TypeFunc::Parms);
1605 if (ValueTypePassFieldsAsArgs) {
1606 if (arg->is_ValueType()) {
1607 ValueTypeNode* vt = arg->as_ValueType();
1608 if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
1609 // We don't pass value type arguments by reference but instead
1610 // pass each field of the value type
1611 idx += vt->pass_fields(call, idx, *this);
1612 } else {
1613 arg = arg->as_ValueType()->allocate(this);
1614 call->init_req(idx, arg);
1615 idx++;
1616 }
1617 // If a value type argument is passed as fields, attach the Method* to the call site
1618 // to be able to access the extended signature later via attached_method_before_pc().
1619 // For example, see CompiledMethod::preserve_callee_argument_oops().
1620 call->set_override_symbolic_info(true);
1621 } else {
1622 call->init_req(idx, arg);
1623 idx++;
1624 }
1625 } else {
1626 if (arg->is_ValueType()) {
1627 // Pass value type argument via oop to callee
1628 arg = arg->as_ValueType()->allocate(this);
1629 }
1630 call->init_req(i, arg);
1631 }
1632 }
1633 }
1634
1635 //---------------------------set_edges_for_java_call---------------------------
1636 // Connect a newly created call into the current JVMS.
1637 // A return value node (if any) is returned from set_edges_for_java_call.
1638 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1639
1640 // Add the predefined inputs:
1641 call->init_req( TypeFunc::Control, control() );
1642 call->init_req( TypeFunc::I_O , i_o() );
1643 call->init_req( TypeFunc::Memory , reset_memory() );
1644 call->init_req( TypeFunc::FramePtr, frameptr() );
1645 call->init_req( TypeFunc::ReturnAdr, top() );
1646
1647 add_safepoint_edges(call, must_throw);
1648
3326 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3327 }
3328 }
3329 #endif //ASSERT
3330
3331 return javaoop;
3332 }
3333
3334 //---------------------------new_instance--------------------------------------
3335 // This routine takes a klass_node which may be constant (for a static type)
3336 // or may be non-constant (for reflective code). It will work equally well
3337 // for either, and the graph will fold nicely if the optimizer later reduces
3338 // the type to a constant.
3339 // The optional arguments are for specialized use by intrinsics:
3340 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3341 // - If 'return_size_val', report the the total object size to the caller.
3342 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3343 Node* GraphKit::new_instance(Node* klass_node,
3344 Node* extra_slow_test,
3345 Node* *return_size_val,
3346 bool deoptimize_on_exception,
3347 ValueTypeNode* value_node) {
3348 // Compute size in doublewords
3349 // The size is always an integral number of doublewords, represented
3350 // as a positive bytewise size stored in the klass's layout_helper.
3351 // The layout_helper also encodes (in a low bit) the need for a slow path.
3352 jint layout_con = Klass::_lh_neutral_value;
3353 Node* layout_val = get_layout_helper(klass_node, layout_con);
3354 int layout_is_con = (layout_val == NULL);
3355
3356 if (extra_slow_test == NULL) extra_slow_test = intcon(0);
3357 // Generate the initial go-slow test. It's either ALWAYS (return a
3358 // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective
3359 // case) a computed value derived from the layout_helper.
3360 Node* initial_slow_test = NULL;
3361 if (layout_is_con) {
3362 assert(!StressReflectiveCode, "stress mode does not use these paths");
3363 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3364 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3365 } else { // reflective case
3366 // This reflective path is used by Unsafe.allocateInstance.
3367 // (It may be stress-tested by specifying StressReflectiveCode.)
3392 (*return_size_val) = size;
3393 }
3394
3395 // This is a precise notnull oop of the klass.
3396 // (Actually, it need not be precise if this is a reflective allocation.)
3397 // It's what we cast the result to.
3398 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3399 if (!tklass) tklass = TypeKlassPtr::OBJECT;
3400 const TypeOopPtr* oop_type = tklass->as_instance_type();
3401
3402 // Now generate allocation code
3403
3404 // The entire memory state is needed for slow path of the allocation
3405 // since GC and deoptimization can happen.
3406 Node *mem = reset_memory();
3407 set_all_memory(mem); // Create new memory state
3408
3409 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3410 control(), mem, i_o(),
3411 size, klass_node,
3412 initial_slow_test, value_node);
3413
3414 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3415 }
3416
3417 //-------------------------------new_array-------------------------------------
3418 // helper for newarray and anewarray
3419 // The 'length' parameter is (obviously) the length of the array.
3420 // See comments on new_instance for the meaning of the other arguments.
3421 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3422 Node* length, // number of array elements
3423 int nargs, // number of arguments to push back for uncommon trap
3424 Node* *return_size_val,
3425 bool deoptimize_on_exception) {
3426 jint layout_con = Klass::_lh_neutral_value;
3427 Node* layout_val = get_layout_helper(klass_node, layout_con);
3428 int layout_is_con = (layout_val == NULL);
3429
3430 if (!layout_is_con && !StressReflectiveCode &&
3431 !too_many_traps(Deoptimization::Reason_class_check)) {
3432 // This is a reflective array creation site.
3623 if (stopped()) {
3624 set_control(null_ctl); // Always zero
3625 return;
3626 }
3627
3628 // Prepare for merging control and IO
3629 RegionNode* res_ctl = new RegionNode(3);
3630 res_ctl->init_req(1, null_ctl);
3631 gvn().set_type(res_ctl, Type::CONTROL);
3632 record_for_igvn(res_ctl);
3633 Node* res_io = PhiNode::make(res_ctl, i_o(), Type::ABIO);
3634 gvn().set_type(res_io, Type::ABIO);
3635 record_for_igvn(res_io);
3636
3637 // TODO comment
3638 SafePointNode* loop_map = NULL;
3639 {
3640 PreserveJVMState pjvms(this);
3641 // Create default value type and store it to memory
3642 Node* oop = ValueTypeNode::make_default(gvn(), vk);
3643 oop = oop->as_ValueType()->allocate(this);
3644
3645 length = SubI(length, intcon(1));
3646 add_predicate(nargs);
3647 RegionNode* loop = new RegionNode(3);
3648 loop->init_req(1, control());
3649 gvn().set_type(loop, Type::CONTROL);
3650 record_for_igvn(loop);
3651
3652 Node* index = new PhiNode(loop, TypeInt::INT);
3653 index->init_req(1, intcon(0));
3654 gvn().set_type(index, TypeInt::INT);
3655 record_for_igvn(index);
3656
3657 // TODO explain why we need to capture all memory
3658 PhiNode* mem = new PhiNode(loop, Type::MEMORY, TypePtr::BOTTOM);
3659 mem->init_req(1, reset_memory());
3660 gvn().set_type(mem, Type::MEMORY);
3661 record_for_igvn(mem);
3662 set_control(loop);
3663 set_all_memory(mem);
|