110 // Ptr types are mixed together with T_ADDRESS but NULL is
111 // really for T_OBJECT types so correct it.
112 bt = T_OBJECT;
113 }
114 Node *mem = memory(Compile::AliasIdxRaw);
115 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
116 Node *ctl = control();
117
118 // Very similar to LoadNode::make, except we handle un-aligned longs and
119 // doubles on Sparc. Intel can handle them just fine directly.
120 Node *l = NULL;
121 switch (bt) { // Signature is flattened
122 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
123 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
124 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
125 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
126 case T_VALUETYPE: {
127 // Load oop and create a new ValueTypeNode
128 const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(type->is_valuetype(), TypePtr::NotNull);
129 l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, vtptr_type, MemNode::unordered));
130 l = ValueTypeNode::make(gvn(), mem, l);
131 break;
132 }
133 case T_VALUETYPEPTR: {
134 l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeValueTypePtr::NOTNULL, MemNode::unordered);
135 break;
136 }
137 case T_LONG:
138 case T_DOUBLE: {
139 // Since arguments are in reverse order, the argument address 'adr'
140 // refers to the back half of the long/double. Recompute adr.
141 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
142 if (Matcher::misaligned_doubles_ok) {
143 l = (bt == T_DOUBLE)
144 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
145 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
146 } else {
147 l = (bt == T_DOUBLE)
148 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
149 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
150 }
188 // TypeFlow asserted a specific object type. Value must have that type.
189 Node* bad_type_ctrl = NULL;
190 l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
191 bad_type_exit->control()->add_req(bad_type_ctrl);
192 }
193
194 BasicType bt_l = _gvn.type(l)->basic_type();
195 BasicType bt_t = type->basic_type();
196 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
197 return l;
198 }
199
200 // Helper routine which sets up elements of the initial parser map when
201 // performing a parse for on stack replacement. Add values into map.
202 // The only parameter contains the address of a interpreter arguments.
203 void Parse::load_interpreter_state(Node* osr_buf) {
204 int index;
205 int max_locals = jvms()->loc_size();
206 int max_stack = jvms()->stk_size();
207
208
209 // Mismatch between method and jvms can occur since map briefly held
210 // an OSR entry state (which takes up one RawPtr word).
211 assert(max_locals == method()->max_locals(), "sanity");
212 assert(max_stack >= method()->max_stack(), "sanity");
213 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
214 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
215
216 // Find the start block.
217 Block* osr_block = start_block();
218 assert(osr_block->start() == osr_bci(), "sanity");
219
220 // Set initial BCI.
221 set_parse_bci(osr_block->start());
222
223 // Set initial stack depth.
224 set_sp(osr_block->start_sp());
225
226 // Check bailouts. We currently do not perform on stack replacement
227 // of loops in catch blocks or loops which branch with a non-empty stack.
228 if (sp() != 0) {
229 C->record_method_not_compilable("OSR starts with non-empty stack");
230 return;
231 }
232 // Do not OSR inside finally clauses:
233 if (osr_block->has_trap_at(osr_block->start())) {
234 C->record_method_not_compilable("OSR starts with an immediate trap");
235 return;
236 }
237
238 // Commute monitors from interpreter frame to compiler frame.
239 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
240 int mcnt = osr_block->flow()->monitor_count();
241 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
242 for (index = 0; index < mcnt; index++) {
243 // Make a BoxLockNode for the monitor.
244 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
245
246
247 // Displaced headers and locked objects are interleaved in the
248 // temp OSR buffer. We only copy the locked objects out here.
249 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
250 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
251 // Try and copy the displaced header to the BoxNode
252 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
253
254
255 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
256
257 // Build a bogus FastLockNode (no code will be generated) and push the
258 // monitor into our debug info.
259 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
260 map()->push_monitor(flock);
261
262 // If the lock is our method synchronization lock, tuck it away in
263 // _sync_lock for return and rethrow exit paths.
264 if (index == 0 && method()->is_synchronized()) {
265 _synch_lock = flock;
266 }
267 }
268
269 // Use the raw liveness computation to make sure that unexpected
270 // values don't propagate into the OSR frame.
271 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
272 if (!live_locals.is_valid()) {
273 // Degenerate or breakpointed method.
274 C->record_method_not_compilable("OSR in empty or breakpointed method");
791 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
792 if (ret_type->isa_int()) {
793 BasicType ret_bt = method()->return_type()->basic_type();
794 if (ret_bt == T_BOOLEAN ||
795 ret_bt == T_CHAR ||
796 ret_bt == T_BYTE ||
797 ret_bt == T_SHORT) {
798 ret_type = TypeInt::INT;
799 }
800 }
801
802 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
803 // becomes loaded during the subsequent parsing, the loaded and unloaded
804 // types will not join when we transform and push in do_exits().
805 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
806 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
807 ret_type = TypeOopPtr::BOTTOM;
808 }
809 if ((_caller->has_method() || tf()->returns_value_type_as_fields()) &&
810 ret_type->isa_valuetypeptr() &&
811 ret_type->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
812 // When inlining or with multiple return values: return value
813 // type as ValueTypeNode not as oop
814 ret_type = ret_type->is_valuetypeptr()->value_type();
815 }
816 int ret_size = type2size[ret_type->basic_type()];
817 Node* ret_phi = new PhiNode(region, ret_type);
818 gvn().set_type_bottom(ret_phi);
819 _exits.ensure_stack(ret_size);
820 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
821 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
822 _exits.set_argument(0, ret_phi); // here is where the parser finds it
823 // Note: ret_phi is not yet pushed, until do_exits.
824 }
825 }
826
827 //----------------------------build_start_state-------------------------------
828 // Construct a state which contains only the incoming arguments from an
829 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
830 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
831 int arg_size_sig = tf->domain_sig()->cnt();
843 entry_nn->set_jvms(entry_jvms);
844 set_default_node_notes(entry_nn);
845 }
846 PhaseGVN& gvn = *initial_gvn();
847 uint j = 0;
848 for (uint i = 0; i < (uint)arg_size_sig; i++) {
849 assert(j >= i, "less actual arguments than in the signature?");
850 if (ValueTypePassFieldsAsArgs) {
851 if (i < TypeFunc::Parms) {
852 assert(i == j, "no change before the actual arguments");
853 Node* parm = gvn.transform(new ParmNode(start, i));
854 map->init_req(i, parm);
855 // Record all these guys for later GVN.
856 record_for_igvn(parm);
857 j++;
858 } else {
859 // Value type arguments are not passed by reference: we get an
860 // argument per field of the value type. Build ValueTypeNodes
861 // from the value type arguments.
862 const Type* t = tf->domain_sig()->field_at(i);
863 if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) {
864 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
865 Node* vt = ValueTypeNode::make(gvn, start, vk, j, true);
866 map->init_req(i, gvn.transform(vt));
867 j += vk->value_arg_slots();
868 } else {
869 Node* parm = gvn.transform(new ParmNode(start, j));
870 map->init_req(i, parm);
871 // Record all these guys for later GVN.
872 record_for_igvn(parm);
873 j++;
874 }
875 }
876 } else {
877 Node* parm = gvn.transform(new ParmNode(start, i));
878 // Check if parameter is a value type pointer
879 if (gvn.type(parm)->isa_valuetypeptr()) {
880 // Create ValueTypeNode from the oop and replace the parameter
881 parm = ValueTypeNode::make(gvn, map->memory(), parm);
882 }
883 map->init_req(i, parm);
884 // Record all these guys for later GVN.
885 record_for_igvn(parm);
886 j++;
887 }
888 }
889 for (; j < map->req(); j++) {
890 map->init_req(j, top());
891 }
892 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
893 set_default_node_notes(old_nn);
894 map->set_jvms(jvms);
895 jvms->set_map(map);
896 return jvms;
897 }
898
899 //-----------------------------make_node_notes---------------------------------
900 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
901 if (caller_nn == NULL) return NULL;
914 GraphKit kit(jvms);
915 Node* ret = new ReturnNode(TypeFunc::Parms,
916 kit.control(),
917 kit.i_o(),
918 kit.reset_memory(),
919 kit.frameptr(),
920 kit.returnadr());
921 // Add zero or 1 return values
922 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
923 if (ret_size > 0) {
924 kit.inc_sp(-ret_size); // pop the return value(s)
925 kit.sync_jvms();
926 Node* res = kit.argument(0);
927 if (tf()->returns_value_type_as_fields()) {
928 // Multiple return values (value type fields): add as many edges
929 // to the Return node as returned values.
930 assert(res->is_ValueType(), "what else supports multi value return");
931 ValueTypeNode* vt = res->as_ValueType();
932 ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
933 vt->pass_klass(ret, TypeFunc::Parms, kit);
934 vt->pass_fields(ret, TypeFunc::Parms+1, kit);
935 } else {
936 ret->add_req(res);
937 // Note: The second dummy edge is not needed by a ReturnNode.
938 }
939 }
940 // bind it to root
941 root()->add_req(ret);
942 record_for_igvn(ret);
943 initial_gvn()->transform_no_reclaim(ret);
944 }
945
946 //------------------------rethrow_exceptions-----------------------------------
947 // Bind all exception states in the list into a single RethrowNode.
948 void Compile::rethrow_exceptions(JVMState* jvms) {
949 GraphKit kit(jvms);
950 if (!kit.has_exceptions()) return; // nothing to generate
951 // Load my combined exception state into the kit, with all phis transformed:
952 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
953 Node* ex_oop = kit.use_exception_state(ex_map);
954 RethrowNode* exit = new RethrowNode(kit.control(),
2255 // Set starting bci for uncommon trap.
2256 set_parse_bci(0);
2257
2258 const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2259 Node* mc_adr = makecon(adr_type);
2260 Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2261 Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2262 Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2263 store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2264 Node *chk = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2265 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2266 { BuildCutout unless(this, tst, PROB_ALWAYS);
2267 uncommon_trap(Deoptimization::Reason_tenured,
2268 Deoptimization::Action_make_not_entrant);
2269 }
2270 }
2271
2272 //------------------------------return_current---------------------------------
2273 // Append current _map to _exit_return
2274 void Parse::return_current(Node* value) {
2275 if (value != NULL && value->is_ValueType() && !_caller->has_method() &&
2276 !tf()->returns_value_type_as_fields()) {
2277 // Returning from root JVMState without multiple returned values,
2278 // make sure value type is allocated
2279 value = value->as_ValueType()->allocate(this);
2280 }
2281
2282 if (RegisterFinalizersAtInit &&
2283 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2284 call_register_finalizer();
2285 }
2286
2287 // Do not set_parse_bci, so that return goo is credited to the return insn.
2288 // vreturn can trigger an allocation so vreturn can throw. Setting
2289 // the bci here breaks exception handling. Commenting this out
2290 // doesn't seem to break anything.
2291 // set_bci(InvocationEntryBci);
2292 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2293 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2294 }
2295 if (C->env()->dtrace_method_probes()) {
2296 make_dtrace_method_exit(method());
2297 }
2298 SafePointNode* exit_return = _exits.map();
2299 exit_return->in( TypeFunc::Control )->add_req( control() );
|
110 // Ptr types are mixed together with T_ADDRESS but NULL is
111 // really for T_OBJECT types so correct it.
112 bt = T_OBJECT;
113 }
114 Node *mem = memory(Compile::AliasIdxRaw);
115 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
116 Node *ctl = control();
117
118 // Very similar to LoadNode::make, except we handle un-aligned longs and
119 // doubles on Sparc. Intel can handle them just fine directly.
120 Node *l = NULL;
121 switch (bt) { // Signature is flattened
122 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
123 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
124 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
125 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
126 case T_VALUETYPE: {
127 // Load oop and create a new ValueTypeNode
128 const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(type->is_valuetype(), TypePtr::NotNull);
129 l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, vtptr_type, MemNode::unordered));
130 l = ValueTypeNode::make(this, l);
131 break;
132 }
133 case T_VALUETYPEPTR: {
134 l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeValueTypePtr::NOTNULL, MemNode::unordered);
135 break;
136 }
137 case T_LONG:
138 case T_DOUBLE: {
139 // Since arguments are in reverse order, the argument address 'adr'
140 // refers to the back half of the long/double. Recompute adr.
141 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
142 if (Matcher::misaligned_doubles_ok) {
143 l = (bt == T_DOUBLE)
144 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
145 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
146 } else {
147 l = (bt == T_DOUBLE)
148 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
149 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
150 }
188 // TypeFlow asserted a specific object type. Value must have that type.
189 Node* bad_type_ctrl = NULL;
190 l = gen_checkcast(l, makecon(TypeKlassPtr::make(tp->klass())), &bad_type_ctrl);
191 bad_type_exit->control()->add_req(bad_type_ctrl);
192 }
193
194 BasicType bt_l = _gvn.type(l)->basic_type();
195 BasicType bt_t = type->basic_type();
196 assert(_gvn.type(l)->higher_equal(type), "must constrain OSR typestate");
197 return l;
198 }
199
200 // Helper routine which sets up elements of the initial parser map when
201 // performing a parse for on stack replacement. Add values into map.
202 // The only parameter contains the address of a interpreter arguments.
203 void Parse::load_interpreter_state(Node* osr_buf) {
204 int index;
205 int max_locals = jvms()->loc_size();
206 int max_stack = jvms()->stk_size();
207
208 // Mismatch between method and jvms can occur since map briefly held
209 // an OSR entry state (which takes up one RawPtr word).
210 assert(max_locals == method()->max_locals(), "sanity");
211 assert(max_stack >= method()->max_stack(), "sanity");
212 assert((int)jvms()->endoff() == TypeFunc::Parms + max_locals + max_stack, "sanity");
213 assert((int)jvms()->endoff() == (int)map()->req(), "sanity");
214
215 // Find the start block.
216 Block* osr_block = start_block();
217 assert(osr_block->start() == osr_bci(), "sanity");
218
219 // Set initial BCI.
220 set_parse_bci(osr_block->start());
221
222 // Set initial stack depth.
223 set_sp(osr_block->start_sp());
224
225 // Check bailouts. We currently do not perform on stack replacement
226 // of loops in catch blocks or loops which branch with a non-empty stack.
227 if (sp() != 0) {
228 C->record_method_not_compilable("OSR starts with non-empty stack");
229 return;
230 }
231 // Do not OSR inside finally clauses:
232 if (osr_block->has_trap_at(osr_block->start())) {
233 C->record_method_not_compilable("OSR starts with an immediate trap");
234 return;
235 }
236
237 // Commute monitors from interpreter frame to compiler frame.
238 assert(jvms()->monitor_depth() == 0, "should be no active locks at beginning of osr");
239 int mcnt = osr_block->flow()->monitor_count();
240 Node *monitors_addr = basic_plus_adr(osr_buf, osr_buf, (max_locals+mcnt*2-1)*wordSize);
241 for (index = 0; index < mcnt; index++) {
242 // Make a BoxLockNode for the monitor.
243 Node *box = _gvn.transform(new BoxLockNode(next_monitor()));
244
245 // Displaced headers and locked objects are interleaved in the
246 // temp OSR buffer. We only copy the locked objects out here.
247 // Fetch the locked object from the OSR temp buffer and copy to our fastlock node.
248 Node* lock_object = fetch_interpreter_state(index*2, Type::get_const_basic_type(T_OBJECT), monitors_addr, osr_buf);
249 // Try and copy the displaced header to the BoxNode
250 Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf);
251
252 store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
253
254 // Build a bogus FastLockNode (no code will be generated) and push the
255 // monitor into our debug info.
256 const FastLockNode *flock = _gvn.transform(new FastLockNode( 0, lock_object, box ))->as_FastLock();
257 map()->push_monitor(flock);
258
259 // If the lock is our method synchronization lock, tuck it away in
260 // _sync_lock for return and rethrow exit paths.
261 if (index == 0 && method()->is_synchronized()) {
262 _synch_lock = flock;
263 }
264 }
265
266 // Use the raw liveness computation to make sure that unexpected
267 // values don't propagate into the OSR frame.
268 MethodLivenessResult live_locals = method()->liveness_at_bci(osr_bci());
269 if (!live_locals.is_valid()) {
270 // Degenerate or breakpointed method.
271 C->record_method_not_compilable("OSR in empty or breakpointed method");
788 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
789 if (ret_type->isa_int()) {
790 BasicType ret_bt = method()->return_type()->basic_type();
791 if (ret_bt == T_BOOLEAN ||
792 ret_bt == T_CHAR ||
793 ret_bt == T_BYTE ||
794 ret_bt == T_SHORT) {
795 ret_type = TypeInt::INT;
796 }
797 }
798
799 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
800 // becomes loaded during the subsequent parsing, the loaded and unloaded
801 // types will not join when we transform and push in do_exits().
802 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
803 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
804 ret_type = TypeOopPtr::BOTTOM;
805 }
806 if ((_caller->has_method() || tf()->returns_value_type_as_fields()) &&
807 ret_type->isa_valuetypeptr() &&
808 !ret_type->is_valuetypeptr()->is__Value()) {
809 // When inlining or with multiple return values: return value
810 // type as ValueTypeNode not as oop
811 ret_type = ret_type->is_valuetypeptr()->value_type();
812 }
813 int ret_size = type2size[ret_type->basic_type()];
814 Node* ret_phi = new PhiNode(region, ret_type);
815 gvn().set_type_bottom(ret_phi);
816 _exits.ensure_stack(ret_size);
817 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
818 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
819 _exits.set_argument(0, ret_phi); // here is where the parser finds it
820 // Note: ret_phi is not yet pushed, until do_exits.
821 }
822 }
823
824 //----------------------------build_start_state-------------------------------
825 // Construct a state which contains only the incoming arguments from an
826 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
827 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
828 int arg_size_sig = tf->domain_sig()->cnt();
840 entry_nn->set_jvms(entry_jvms);
841 set_default_node_notes(entry_nn);
842 }
843 PhaseGVN& gvn = *initial_gvn();
844 uint j = 0;
845 for (uint i = 0; i < (uint)arg_size_sig; i++) {
846 assert(j >= i, "less actual arguments than in the signature?");
847 if (ValueTypePassFieldsAsArgs) {
848 if (i < TypeFunc::Parms) {
849 assert(i == j, "no change before the actual arguments");
850 Node* parm = gvn.transform(new ParmNode(start, i));
851 map->init_req(i, parm);
852 // Record all these guys for later GVN.
853 record_for_igvn(parm);
854 j++;
855 } else {
856 // Value type arguments are not passed by reference: we get an
857 // argument per field of the value type. Build ValueTypeNodes
858 // from the value type arguments.
859 const Type* t = tf->domain_sig()->field_at(i);
860 if (t->isa_valuetypeptr() && !t->is_valuetypeptr()->is__Value()) {
861 ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass();
862 Node* ctl = map->control();
863 Node* vt = ValueTypeNode::make(gvn, ctl, map->memory(), start, vk, j, true);
864 map->set_control(ctl);
865 map->init_req(i, gvn.transform(vt));
866 j += vk->value_arg_slots();
867 } else {
868 Node* parm = gvn.transform(new ParmNode(start, j));
869 map->init_req(i, parm);
870 // Record all these guys for later GVN.
871 record_for_igvn(parm);
872 j++;
873 }
874 }
875 } else {
876 Node* parm = gvn.transform(new ParmNode(start, i));
877 // Check if parameter is a value type pointer
878 if (gvn.type(parm)->isa_valuetypeptr()) {
879 // Create ValueTypeNode from the oop and replace the parameter
880 Node* ctl = map->control();
881 parm = ValueTypeNode::make(gvn, ctl, map->memory(), parm);
882 map->set_control(ctl);
883 }
884 map->init_req(i, parm);
885 // Record all these guys for later GVN.
886 record_for_igvn(parm);
887 j++;
888 }
889 }
890 for (; j < map->req(); j++) {
891 map->init_req(j, top());
892 }
893 assert(jvms->argoff() == TypeFunc::Parms, "parser gets arguments here");
894 set_default_node_notes(old_nn);
895 map->set_jvms(jvms);
896 jvms->set_map(map);
897 return jvms;
898 }
899
900 //-----------------------------make_node_notes---------------------------------
901 Node_Notes* Parse::make_node_notes(Node_Notes* caller_nn) {
902 if (caller_nn == NULL) return NULL;
915 GraphKit kit(jvms);
916 Node* ret = new ReturnNode(TypeFunc::Parms,
917 kit.control(),
918 kit.i_o(),
919 kit.reset_memory(),
920 kit.frameptr(),
921 kit.returnadr());
922 // Add zero or 1 return values
923 int ret_size = tf()->range_sig()->cnt() - TypeFunc::Parms;
924 if (ret_size > 0) {
925 kit.inc_sp(-ret_size); // pop the return value(s)
926 kit.sync_jvms();
927 Node* res = kit.argument(0);
928 if (tf()->returns_value_type_as_fields()) {
929 // Multiple return values (value type fields): add as many edges
930 // to the Return node as returned values.
931 assert(res->is_ValueType(), "what else supports multi value return");
932 ValueTypeNode* vt = res->as_ValueType();
933 ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms);
934 vt->pass_klass(ret, TypeFunc::Parms, kit);
935 vt->pass_fields(ret, TypeFunc::Parms+1, kit, /* assert_allocated */ true);
936 } else {
937 ret->add_req(res);
938 // Note: The second dummy edge is not needed by a ReturnNode.
939 }
940 }
941 // bind it to root
942 root()->add_req(ret);
943 record_for_igvn(ret);
944 initial_gvn()->transform_no_reclaim(ret);
945 }
946
947 //------------------------rethrow_exceptions-----------------------------------
948 // Bind all exception states in the list into a single RethrowNode.
949 void Compile::rethrow_exceptions(JVMState* jvms) {
950 GraphKit kit(jvms);
951 if (!kit.has_exceptions()) return; // nothing to generate
952 // Load my combined exception state into the kit, with all phis transformed:
953 SafePointNode* ex_map = kit.combine_and_pop_all_exception_states();
954 Node* ex_oop = kit.use_exception_state(ex_map);
955 RethrowNode* exit = new RethrowNode(kit.control(),
2256 // Set starting bci for uncommon trap.
2257 set_parse_bci(0);
2258
2259 const TypePtr* adr_type = TypeRawPtr::make((address)mc);
2260 Node* mc_adr = makecon(adr_type);
2261 Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset()));
2262 Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
2263 Node* decr = _gvn.transform(new SubINode(cnt, makecon(TypeInt::ONE)));
2264 store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered);
2265 Node *chk = _gvn.transform(new CmpINode(decr, makecon(TypeInt::ZERO)));
2266 Node* tst = _gvn.transform(new BoolNode(chk, BoolTest::gt));
2267 { BuildCutout unless(this, tst, PROB_ALWAYS);
2268 uncommon_trap(Deoptimization::Reason_tenured,
2269 Deoptimization::Action_make_not_entrant);
2270 }
2271 }
2272
2273 //------------------------------return_current---------------------------------
2274 // Append current _map to _exit_return
2275 void Parse::return_current(Node* value) {
2276 if (value != NULL && value->is_ValueType() && !_caller->has_method()) {
2277 // Returning a value type from root JVMState
2278 if (tf()->returns_value_type_as_fields()) {
2279 // Value type is returned as fields, make sure non-flattened value type fields are allocated
2280 value = value->as_ValueType()->allocate_fields(this);
2281 } else {
2282 // Value type is returned as oop, make sure it's allocated
2283 value = value->as_ValueType()->allocate(this)->get_oop();
2284 }
2285 }
2286
2287 if (RegisterFinalizersAtInit &&
2288 method()->intrinsic_id() == vmIntrinsics::_Object_init) {
2289 call_register_finalizer();
2290 }
2291
2292 // Do not set_parse_bci, so that return goo is credited to the return insn.
2293 // vreturn can trigger an allocation so vreturn can throw. Setting
2294 // the bci here breaks exception handling. Commenting this out
2295 // doesn't seem to break anything.
2296 // set_bci(InvocationEntryBci);
2297 if (method()->is_synchronized() && GenerateSynchronizationCode) {
2298 shared_unlock(_synch_lock->box_node(), _synch_lock->obj_node());
2299 }
2300 if (C->env()->dtrace_method_probes()) {
2301 make_dtrace_method_exit(method());
2302 }
2303 SafePointNode* exit_return = _exits.map();
2304 exit_return->in( TypeFunc::Control )->add_req( control() );
|