140 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
141 }
142 break;
143 }
144 default: ShouldNotReachHere();
145 }
146 return _gvn.transform(l);
147 }
148
149 // Helper routine to prevent the interpreter from handing
150 // unexpected typestate to an OSR method.
151 // The Node l is a value newly dug out of the interpreter frame.
152 // The type is the type predicted by ciTypeFlow. Note that it is
153 // not a general type, but can only come from Type::get_typeflow_type.
154 // The safepoint is a map which will feed an uncommon trap.
155 Node* Parse::check_interpreter_type(Node* l, const Type* type,
156 SafePointNode* &bad_type_exit) {
157 const TypeOopPtr* tp = type->isa_oopptr();
158 if (type->isa_valuetype() != NULL) {
159 // The interpreter passes value types as oops
160 tp = TypeOopPtr::make_from_klass(type->isa_valuetype()->value_klass());
161 tp = tp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
162 }
163
164 // TypeFlow may assert null-ness if a type appears unloaded.
165 if (type == TypePtr::NULL_PTR ||
166 (tp != NULL && !tp->klass()->is_loaded())) {
167 // Value must be null, not a real oop.
168 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
169 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
170 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
171 set_control(_gvn.transform( new IfTrueNode(iff) ));
172 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
173 bad_type_exit->control()->add_req(bad_type);
174 l = null();
175 }
176
177 // Typeflow can also cut off paths from the CFG, based on
178 // types which appear unloaded, or call sites which appear unlinked.
179 // When paths are cut off, values at later merge points can rise
180 // toward more specific classes. Make sure these specific classes
910 kit.reset_memory(),
911 kit.frameptr(),
912 kit.returnadr());
913 // Add zero or 1 return values
914 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
915 if (ret_size > 0) {
916 kit.inc_sp(-ret_size); // pop the return value(s)
917 kit.sync_jvms();
918 Node* res = kit.argument(0);
919 if (tf()->returns_value_type_as_fields()) {
920 // Multiple return values (value type fields): add as many edges
921 // to the Return node as returned values.
922 assert(res->is_ValueType(), "what else supports multi value return?");
923 ValueTypeNode* vt = res->as_ValueType();
924 ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
925 if (vt->is_allocated(&kit.gvn()) && !StressValueTypeReturnedAsFields) {
926 ret->init_req(TypeFunc::Parms, vt->get_oop());
927 } else {
928 ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn()));
929 }
930 const Array<SigEntry>* sig_array = vt->type()->is_valuetype()->value_klass()->extended_sig();
931 GrowableArray<SigEntry> sig = GrowableArray<SigEntry>(sig_array->length());
932 sig.appendAll(sig_array);
933 ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter());
934 uint idx = TypeFunc::Parms+1;
935 vt->pass_fields(&kit, ret, sig_cc, idx);
936 } else {
937 ret->add_req(res);
938 // Note: The second dummy edge is not needed by a ReturnNode.
939 }
940 }
941 // bind it to root
942 root()->add_req(ret);
943 record_for_igvn(ret);
944 initial_gvn()->transform_no_reclaim(ret);
945 }
946
947 //------------------------rethrow_exceptions-----------------------------------
948 // Bind all exception states in the list into a single RethrowNode.
949 void Compile::rethrow_exceptions(JVMState* jvms) {
950 GraphKit kit(jvms);
2310
2311 // Do not set_parse_bci, so that return goo is credited to the return insn.
2312 // vreturn can trigger an allocation so vreturn can throw. Setting
2313 // the bci here breaks exception handling. Commenting this out
2314 // doesn't seem to break anything.
2315 // set_bci(InvocationEntryBci);
2316 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2317 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2318 }
2319 if (C->env()->dtrace_method_probes()) {
2320 make_dtrace_method_exit(method());
2321 }
2322 // frame pointer is always same, already captured
2323 if (value != NULL) {
2324 Node* phi = _exits.argument(0);
2325 const Type* return_type = phi->bottom_type();
2326 const TypeOopPtr* tr = return_type->isa_oopptr();
2327 if (return_type->isa_valuetype()) {
2328 // Value type is returned as fields, make sure it is scalarized
2329 if (!value->is_ValueType()) {
2330 value = ValueTypeNode::make_from_oop(this, value, return_type->is_valuetype()->value_klass());
2331 }
2332 if (!_caller->has_method()) {
2333 // Value type is returned as fields from root method, make
2334 // sure all non-flattened value type fields are allocated.
2335 assert(tf()->returns_value_type_as_fields(), "must be returned as fields");
2336 value = value->as_ValueType()->allocate_fields(this);
2337 }
2338 } else if (value->is_ValueType()) {
2339 // Value type is returned as oop, make sure it is allocated
2340 assert(tr && tr->can_be_value_type(), "must return a value type pointer");
2341 value = ValueTypePtrNode::make_from_value_type(this, value->as_ValueType());
2342 } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) {
2343 // If returning oops to an interface-return, there is a silent free
2344 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2345 const TypeInstPtr* tp = value->bottom_type()->isa_instptr();
2346 if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) {
2347 // sharpen the type eagerly; this eases certain assert checking
2348 if (tp->higher_equal(TypeInstPtr::NOTNULL)) {
2349 tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2350 }
|
140 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
141 }
142 break;
143 }
144 default: ShouldNotReachHere();
145 }
146 return _gvn.transform(l);
147 }
148
149 // Helper routine to prevent the interpreter from handing
150 // unexpected typestate to an OSR method.
151 // The Node l is a value newly dug out of the interpreter frame.
152 // The type is the type predicted by ciTypeFlow. Note that it is
153 // not a general type, but can only come from Type::get_typeflow_type.
154 // The safepoint is a map which will feed an uncommon trap.
155 Node* Parse::check_interpreter_type(Node* l, const Type* type,
156 SafePointNode* &bad_type_exit) {
157 const TypeOopPtr* tp = type->isa_oopptr();
158 if (type->isa_valuetype() != NULL) {
159 // The interpreter passes value types as oops
160 tp = TypeOopPtr::make_from_klass(type->value_klass());
161 tp = tp->join_speculative(TypePtr::NOTNULL)->is_oopptr();
162 }
163
164 // TypeFlow may assert null-ness if a type appears unloaded.
165 if (type == TypePtr::NULL_PTR ||
166 (tp != NULL && !tp->klass()->is_loaded())) {
167 // Value must be null, not a real oop.
168 Node* chk = _gvn.transform( new CmpPNode(l, null()) );
169 Node* tst = _gvn.transform( new BoolNode(chk, BoolTest::eq) );
170 IfNode* iff = create_and_map_if(control(), tst, PROB_MAX, COUNT_UNKNOWN);
171 set_control(_gvn.transform( new IfTrueNode(iff) ));
172 Node* bad_type = _gvn.transform( new IfFalseNode(iff) );
173 bad_type_exit->control()->add_req(bad_type);
174 l = null();
175 }
176
177 // Typeflow can also cut off paths from the CFG, based on
178 // types which appear unloaded, or call sites which appear unlinked.
179 // When paths are cut off, values at later merge points can rise
180 // toward more specific classes. Make sure these specific classes
910 kit.reset_memory(),
911 kit.frameptr(),
912 kit.returnadr());
913 // Add zero or 1 return values
914 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
915 if (ret_size > 0) {
916 kit.inc_sp(-ret_size); // pop the return value(s)
917 kit.sync_jvms();
918 Node* res = kit.argument(0);
919 if (tf()->returns_value_type_as_fields()) {
920 // Multiple return values (value type fields): add as many edges
921 // to the Return node as returned values.
922 assert(res->is_ValueType(), "what else supports multi value return?");
923 ValueTypeNode* vt = res->as_ValueType();
924 ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
925 if (vt->is_allocated(&kit.gvn()) && !StressValueTypeReturnedAsFields) {
926 ret->init_req(TypeFunc::Parms, vt->get_oop());
927 } else {
928 ret->init_req(TypeFunc::Parms, vt->tagged_klass(kit.gvn()));
929 }
930 const Array<SigEntry>* sig_array = vt->type()->value_klass()->extended_sig();
931 GrowableArray<SigEntry> sig = GrowableArray<SigEntry>(sig_array->length());
932 sig.appendAll(sig_array);
933 ExtendedSignature sig_cc = ExtendedSignature(&sig, SigEntryFilter());
934 uint idx = TypeFunc::Parms+1;
935 vt->pass_fields(&kit, ret, sig_cc, idx);
936 } else {
937 ret->add_req(res);
938 // Note: The second dummy edge is not needed by a ReturnNode.
939 }
940 }
941 // bind it to root
942 root()->add_req(ret);
943 record_for_igvn(ret);
944 initial_gvn()->transform_no_reclaim(ret);
945 }
946
947 //------------------------rethrow_exceptions-----------------------------------
948 // Bind all exception states in the list into a single RethrowNode.
949 void Compile::rethrow_exceptions(JVMState* jvms) {
950 GraphKit kit(jvms);
2310
2311 // Do not set_parse_bci, so that return goo is credited to the return insn.
2312 // vreturn can trigger an allocation so vreturn can throw. Setting
2313 // the bci here breaks exception handling. Commenting this out
2314 // doesn't seem to break anything.
2315 // set_bci(InvocationEntryBci);
2316 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2317 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2318 }
2319 if (C->env()->dtrace_method_probes()) {
2320 make_dtrace_method_exit(method());
2321 }
2322 // frame pointer is always same, already captured
2323 if (value != NULL) {
2324 Node* phi = _exits.argument(0);
2325 const Type* return_type = phi->bottom_type();
2326 const TypeOopPtr* tr = return_type->isa_oopptr();
2327 if (return_type->isa_valuetype()) {
2328 // Value type is returned as fields, make sure it is scalarized
2329 if (!value->is_ValueType()) {
2330 value = ValueTypeNode::make_from_oop(this, value, return_type->value_klass());
2331 }
2332 if (!_caller->has_method()) {
2333 // Value type is returned as fields from root method, make
2334 // sure all non-flattened value type fields are allocated.
2335 assert(tf()->returns_value_type_as_fields(), "must be returned as fields");
2336 value = value->as_ValueType()->allocate_fields(this);
2337 }
2338 } else if (value->is_ValueType()) {
2339 // Value type is returned as oop, make sure it is allocated
2340 assert(tr && tr->can_be_value_type(), "must return a value type pointer");
2341 value = ValueTypePtrNode::make_from_value_type(this, value->as_ValueType());
2342 } else if (tr && tr->isa_instptr() && tr->klass()->is_loaded() && tr->klass()->is_interface()) {
2343 // If returning oops to an interface-return, there is a silent free
2344 // cast from oop to interface allowed by the Verifier. Make it explicit here.
2345 const TypeInstPtr* tp = value->bottom_type()->isa_instptr();
2346 if (tp && tp->klass()->is_loaded() && !tp->klass()->is_interface()) {
2347 // sharpen the type eagerly; this eases certain assert checking
2348 if (tp->higher_equal(TypeInstPtr::NOTNULL)) {
2349 tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
2350 }
|