109 BasicType bt = type->basic_type();
110 if (type == TypePtr::NULL_PTR) {
111 // Ptr types are mixed together with T_ADDRESS but NULL is
112 // really for T_OBJECT types so correct it.
113 bt = T_OBJECT;
114 }
115 Node *mem = memory(Compile::AliasIdxRaw);
116 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
117 Node *ctl = control();
118
119 // Very similar to LoadNode::make, except we handle un-aligned longs and
120 // doubles on Sparc. Intel can handle them just fine directly.
121 Node *l = NULL;
122 switch (bt) { // Signature is flattened
123 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
124 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
125 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
126 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
127 case T_VALUETYPE: {
128 // Load oop and create a new ValueTypeNode
129 const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(TypePtr::NotNull, type->is_valuetype()->value_klass());
130 l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, vtptr_type, MemNode::unordered));
131 // Value type oop may point to the TLVB
132 l = ValueTypeNode::make_from_oop(this, l, vtptr_type->value_klass(), /* null_check */ false, /* buffer_check */ true);
133 break;
134 }
135 case T_VALUETYPEPTR: {
136 l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeValueTypePtr::NOTNULL, MemNode::unordered);
137 break;
138 }
139 case T_LONG:
140 case T_DOUBLE: {
141 // Since arguments are in reverse order, the argument address 'adr'
142 // refers to the back half of the long/double. Recompute adr.
143 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
144 if (Matcher::misaligned_doubles_ok) {
145 l = (bt == T_DOUBLE)
146 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
147 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
148 } else {
149 l = (bt == T_DOUBLE)
150 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
151 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
152 }
153 break;
154 }
155 default: ShouldNotReachHere();
156 }
593 decrement_age();
594 }
595 }
596
597 if (depth() == 1 && !failing()) {
598 // Add check to deoptimize the nmethod if RTM state was changed
599 rtm_deopt();
600 }
601
602 // Check for bailouts during method entry or RTM state check setup.
603 if (failing()) {
604 if (log) log->done("parse");
605 C->set_default_node_notes(caller_nn);
606 return;
607 }
608
609 // Handle value type arguments
610 int arg_size_sig = tf()->domain_sig()->cnt();
611 for (uint i = 0; i < (uint)arg_size_sig; i++) {
612 Node* parm = map()->in(i);
613 const TypeValueTypePtr* vtptr = _gvn.type(parm)->isa_valuetypeptr();
614 if (vtptr != NULL) {
615 // Create ValueTypeNode from the oop and replace the parameter
616 Node* null_ctl = top();
617 Node* not_null_obj = null_check_common(parm, T_VALUETYPE, false, &null_ctl, false);
618 if (null_ctl != top()) {
619 // TODO For now, we just deoptimize if value type is NULL
620 PreserveJVMState pjvms(this);
621 set_control(null_ctl);
622 replace_in_map(parm, null());
623 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
624 }
625 // Value type oop may point to the TLVB
626 Node* vt = ValueTypeNode::make_from_oop(this, not_null_obj, vtptr->value_klass(), /* null_check */ false, /* buffer_check */ true);
627 map()->replace_edge(parm, vt);
628 }
629 }
630
631 entry_map = map(); // capture any changes performed by method setup code
632 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
633
634 // We begin parsing as if we have just encountered a jump to the
635 // method entry.
636 Block* entry_block = start_block();
637 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
638 set_map_clone(entry_map);
639 merge_common(entry_block, entry_block->next_path_num());
640
641 #ifndef PRODUCT
642 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
643 set_parse_histogram( parse_histogram_obj );
644 #endif
645
646 // Parse all the basic blocks.
810 // Add a return value to the exit state. (Do not push it yet.)
811 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
812 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
813 if (ret_type->isa_int()) {
814 BasicType ret_bt = method()->return_type()->basic_type();
815 if (ret_bt == T_BOOLEAN ||
816 ret_bt == T_CHAR ||
817 ret_bt == T_BYTE ||
818 ret_bt == T_SHORT) {
819 ret_type = TypeInt::INT;
820 }
821 }
822
823 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
824 // becomes loaded during the subsequent parsing, the loaded and unloaded
825 // types will not join when we transform and push in do_exits().
826 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
827 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
828 ret_type = TypeOopPtr::BOTTOM;
829 }
830 if ((_caller->has_method() || tf()->returns_value_type_as_fields()) &&
831 ret_type->isa_valuetypeptr() && !ret_type->is_valuetypeptr()->is__Value()) {
832 // When inlining or with multiple return values: return value
833 // type as ValueTypeNode not as oop
834 ret_type = TypeValueType::make(ret_type->is_valuetypeptr()->value_klass());
835 }
836 int ret_size = type2size[ret_type->basic_type()];
837 Node* ret_phi = new PhiNode(region, ret_type);
838 gvn().set_type_bottom(ret_phi);
839 _exits.ensure_stack(ret_size);
840 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
841 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
842 _exits.set_argument(0, ret_phi); // here is where the parser finds it
843 // Note: ret_phi is not yet pushed, until do_exits.
844 }
845 }
846
847 //----------------------------build_start_state-------------------------------
848 // Construct a state which contains only the incoming arguments from an
849 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
850 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
851 int arg_size_sig = tf->domain_sig()->cnt();
852 int max_size = MAX2(arg_size_sig, (int)tf->range_cc()->cnt());
853 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
854 SafePointNode* map = new SafePointNode(max_size, NULL);
863 entry_nn->set_jvms(entry_jvms);
864 set_default_node_notes(entry_nn);
865 }
866 PhaseGVN& gvn = *initial_gvn();
867 uint j = 0;
868 for (uint i = 0; i < (uint)arg_size_sig; i++) {
869 assert(j >= i, "less actual arguments than in the signature?");
870 if (ValueTypePassFieldsAsArgs) {
871 if (i < TypeFunc::Parms) {
872 assert(i == j, "no change before the actual arguments");
873 Node* parm = gvn.transform(new ParmNode(start, i));
874 map->init_req(i, parm);
875 // Record all these guys for later GVN.
876 record_for_igvn(parm);
877 j++;
878 } else {
879 // Value type arguments are not passed by reference: we get an
880 // argument per field of the value type. Build ValueTypeNodes
881 // from the value type arguments.
882 const Type* t = tf->domain_sig()->field_at(i);
883 if (t->isa_valuetypeptr() && !t->is_valuetypeptr()->is__Value()) {
884 ciValueKlass* vk = t->is_valuetypeptr()->value_klass();
885 Node* ctl = map->control();
886 ValueTypeNode* vt = ValueTypeNode::make_from_multi(gvn, ctl, map->memory(), start, vk, j, true);
887 map->set_control(ctl);
888 map->init_req(i, vt);
889 j += vk->value_arg_slots();
890 } else {
891 Node* parm = gvn.transform(new ParmNode(start, j));
892 map->init_req(i, parm);
893 // Record all these guys for later GVN.
894 record_for_igvn(parm);
895 j++;
896 }
897 }
898 } else {
899 Node* parm = gvn.transform(new ParmNode(start, i));
900 map->init_req(i, parm);
901 // Record all these guys for later GVN.
902 record_for_igvn(parm);
903 j++;
904 }
1116 // In case of concurrent class loading, the type we set for the
1117 // ret_phi in build_exits() may have been too optimistic and the
1118 // ret_phi may be top now.
1119 // Otherwise, we've encountered an error and have to mark the method as
1120 // not compilable. Just using an assertion instead would be dangerous
1121 // as this could lead to an infinite compile loop in non-debug builds.
1122 {
1123 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
1124 if (C->env()->system_dictionary_modification_counter_changed()) {
1125 C->record_failure(C2Compiler::retry_class_loading_during_parsing());
1126 } else {
1127 C->record_method_not_compilable("Can't determine return type.");
1128 }
1129 }
1130 return;
1131 }
1132 if (ret_type->isa_int()) {
1133 BasicType ret_bt = method()->return_type()->basic_type();
1134 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1135 }
1136 if (_caller->has_method() && ret_type->isa_valuetypeptr()) {
1137 // Inlined methods return a ValueTypeNode
1138 _exits.push_node(T_VALUETYPE, ret_phi);
1139 } else {
1140 _exits.push_node(ret_type->basic_type(), ret_phi);
1141 }
1142 }
1143
1144 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1145
1146 // Unlock along the exceptional paths.
1147 // This is done late so that we can common up equivalent exceptions
1148 // (e.g., null checks) arising from multiple points within this method.
1149 // See GraphKit::add_exception_state, which performs the commoning.
1150 bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
1151
1152 // record exit from a method if compiled while Dtrace is turned on.
1153 if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
1154 // First move the exception list out of _exits:
1155 GraphKit kit(_exits.transfer_exceptions_into_jvms());
1156 SafePointNode* normal_map = kit.map(); // keep this guy safe
|
109 BasicType bt = type->basic_type();
110 if (type == TypePtr::NULL_PTR) {
111 // Ptr types are mixed together with T_ADDRESS but NULL is
112 // really for T_OBJECT types so correct it.
113 bt = T_OBJECT;
114 }
115 Node *mem = memory(Compile::AliasIdxRaw);
116 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize );
117 Node *ctl = control();
118
119 // Very similar to LoadNode::make, except we handle un-aligned longs and
120 // doubles on Sparc. Intel can handle them just fine directly.
121 Node *l = NULL;
122 switch (bt) { // Signature is flattened
123 case T_INT: l = new LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT, MemNode::unordered); break;
124 case T_FLOAT: l = new LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT, MemNode::unordered); break;
125 case T_ADDRESS: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered); break;
126 case T_OBJECT: l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
127 case T_VALUETYPE: {
128 // Load oop and create a new ValueTypeNode
129 const TypeInstPtr* ptr_type = TypeInstPtr::make(TypePtr::NotNull, type->is_valuetype()->value_klass());
130 l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, ptr_type, MemNode::unordered));
131 // Value type oop may point to the TLVB
132 l = ValueTypeNode::make_from_oop(this, l, type->is_valuetype()->value_klass(), /* null_check */ false, /* buffer_check */ true);
133 break;
134 }
135 case T_VALUETYPEPTR: {
136 l = new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::NOTNULL, MemNode::unordered);
137 break;
138 }
139 case T_LONG:
140 case T_DOUBLE: {
141 // Since arguments are in reverse order, the argument address 'adr'
142 // refers to the back half of the long/double. Recompute adr.
143 adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
144 if (Matcher::misaligned_doubles_ok) {
145 l = (bt == T_DOUBLE)
146 ? (Node*)new LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
147 : (Node*)new LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
148 } else {
149 l = (bt == T_DOUBLE)
150 ? (Node*)new LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
151 : (Node*)new LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
152 }
153 break;
154 }
155 default: ShouldNotReachHere();
156 }
593 decrement_age();
594 }
595 }
596
597 if (depth() == 1 && !failing()) {
598 // Add check to deoptimize the nmethod if RTM state was changed
599 rtm_deopt();
600 }
601
602 // Check for bailouts during method entry or RTM state check setup.
603 if (failing()) {
604 if (log) log->done("parse");
605 C->set_default_node_notes(caller_nn);
606 return;
607 }
608
609 // Handle value type arguments
610 int arg_size_sig = tf()->domain_sig()->cnt();
611 for (uint i = 0; i < (uint)arg_size_sig; i++) {
612 Node* parm = map()->in(i);
613 const Type* t = _gvn.type(parm);
614 if (t->is_valuetypeptr()) {
615 // Create ValueTypeNode from the oop and replace the parameter
616 Node* null_ctl = top();
617 Node* not_null_obj = null_check_common(parm, T_VALUETYPE, false, &null_ctl, false);
618 if (null_ctl != top()) {
619 // TODO For now, we just deoptimize if value type is NULL
620 PreserveJVMState pjvms(this);
621 set_control(null_ctl);
622 replace_in_map(parm, null());
623 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none);
624 }
625 // Value type oop may point to the TLVB
626 Node* vt = ValueTypeNode::make_from_oop(this, not_null_obj, t->value_klass(), /* null_check */ false, /* buffer_check */ true);
627 map()->replace_edge(parm, vt);
628 }
629 }
630
631 entry_map = map(); // capture any changes performed by method setup code
632 assert(jvms()->endoff() == map()->req(), "map matches JVMS layout");
633
634 // We begin parsing as if we have just encountered a jump to the
635 // method entry.
636 Block* entry_block = start_block();
637 assert(entry_block->start() == (is_osr_parse() ? osr_bci() : 0), "");
638 set_map_clone(entry_map);
639 merge_common(entry_block, entry_block->next_path_num());
640
641 #ifndef PRODUCT
642 BytecodeParseHistogram *parse_histogram_obj = new (C->env()->arena()) BytecodeParseHistogram(this, C);
643 set_parse_histogram( parse_histogram_obj );
644 #endif
645
646 // Parse all the basic blocks.
810 // Add a return value to the exit state. (Do not push it yet.)
811 if (tf()->range_sig()->cnt() > TypeFunc::Parms) {
812 const Type* ret_type = tf()->range_sig()->field_at(TypeFunc::Parms);
813 if (ret_type->isa_int()) {
814 BasicType ret_bt = method()->return_type()->basic_type();
815 if (ret_bt == T_BOOLEAN ||
816 ret_bt == T_CHAR ||
817 ret_bt == T_BYTE ||
818 ret_bt == T_SHORT) {
819 ret_type = TypeInt::INT;
820 }
821 }
822
823 // Don't "bind" an unloaded return klass to the ret_phi. If the klass
824 // becomes loaded during the subsequent parsing, the loaded and unloaded
825 // types will not join when we transform and push in do_exits().
826 const TypeOopPtr* ret_oop_type = ret_type->isa_oopptr();
827 if (ret_oop_type && !ret_oop_type->klass()->is_loaded()) {
828 ret_type = TypeOopPtr::BOTTOM;
829 }
830 if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && ret_type->is_valuetypeptr()) {
831 // When inlining or with multiple return values: return value
832 // type as ValueTypeNode not as oop
833 ret_type = TypeValueType::make(ret_type->value_klass());
834 }
835 int ret_size = type2size[ret_type->basic_type()];
836 Node* ret_phi = new PhiNode(region, ret_type);
837 gvn().set_type_bottom(ret_phi);
838 _exits.ensure_stack(ret_size);
839 assert((int)(tf()->range_sig()->cnt() - TypeFunc::Parms) == ret_size, "good tf range");
840 assert(method()->return_type()->size() == ret_size, "tf agrees w/ method");
841 _exits.set_argument(0, ret_phi); // here is where the parser finds it
842 // Note: ret_phi is not yet pushed, until do_exits.
843 }
844 }
845
846 //----------------------------build_start_state-------------------------------
847 // Construct a state which contains only the incoming arguments from an
848 // unknown caller. The method & bci will be NULL & InvocationEntryBci.
849 JVMState* Compile::build_start_state(StartNode* start, const TypeFunc* tf) {
850 int arg_size_sig = tf->domain_sig()->cnt();
851 int max_size = MAX2(arg_size_sig, (int)tf->range_cc()->cnt());
852 JVMState* jvms = new (this) JVMState(max_size - TypeFunc::Parms);
853 SafePointNode* map = new SafePointNode(max_size, NULL);
862 entry_nn->set_jvms(entry_jvms);
863 set_default_node_notes(entry_nn);
864 }
865 PhaseGVN& gvn = *initial_gvn();
866 uint j = 0;
867 for (uint i = 0; i < (uint)arg_size_sig; i++) {
868 assert(j >= i, "less actual arguments than in the signature?");
869 if (ValueTypePassFieldsAsArgs) {
870 if (i < TypeFunc::Parms) {
871 assert(i == j, "no change before the actual arguments");
872 Node* parm = gvn.transform(new ParmNode(start, i));
873 map->init_req(i, parm);
874 // Record all these guys for later GVN.
875 record_for_igvn(parm);
876 j++;
877 } else {
878 // Value type arguments are not passed by reference: we get an
879 // argument per field of the value type. Build ValueTypeNodes
880 // from the value type arguments.
881 const Type* t = tf->domain_sig()->field_at(i);
882 if (t->is_valuetypeptr()) {
883 ciValueKlass* vk = t->value_klass();
884 Node* ctl = map->control();
885 ValueTypeNode* vt = ValueTypeNode::make_from_multi(gvn, ctl, map->memory(), start, vk, j, true);
886 map->set_control(ctl);
887 map->init_req(i, vt);
888 j += vk->value_arg_slots();
889 } else {
890 Node* parm = gvn.transform(new ParmNode(start, j));
891 map->init_req(i, parm);
892 // Record all these guys for later GVN.
893 record_for_igvn(parm);
894 j++;
895 }
896 }
897 } else {
898 Node* parm = gvn.transform(new ParmNode(start, i));
899 map->init_req(i, parm);
900 // Record all these guys for later GVN.
901 record_for_igvn(parm);
902 j++;
903 }
1115 // In case of concurrent class loading, the type we set for the
1116 // ret_phi in build_exits() may have been too optimistic and the
1117 // ret_phi may be top now.
1118 // Otherwise, we've encountered an error and have to mark the method as
1119 // not compilable. Just using an assertion instead would be dangerous
1120 // as this could lead to an infinite compile loop in non-debug builds.
1121 {
1122 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
1123 if (C->env()->system_dictionary_modification_counter_changed()) {
1124 C->record_failure(C2Compiler::retry_class_loading_during_parsing());
1125 } else {
1126 C->record_method_not_compilable("Can't determine return type.");
1127 }
1128 }
1129 return;
1130 }
1131 if (ret_type->isa_int()) {
1132 BasicType ret_bt = method()->return_type()->basic_type();
1133 ret_phi = mask_int_value(ret_phi, ret_bt, &_gvn);
1134 }
1135 if (_caller->has_method() && ret_type->is_valuetypeptr()) {
1136 // Inlined methods return a ValueTypeNode
1137 _exits.push_node(T_VALUETYPE, ret_phi);
1138 } else {
1139 _exits.push_node(ret_type->basic_type(), ret_phi);
1140 }
1141 }
1142
1143 // Note: Logic for creating and optimizing the ReturnNode is in Compile.
1144
1145 // Unlock along the exceptional paths.
1146 // This is done late so that we can common up equivalent exceptions
1147 // (e.g., null checks) arising from multiple points within this method.
1148 // See GraphKit::add_exception_state, which performs the commoning.
1149 bool do_synch = method()->is_synchronized() && GenerateSynchronizationCode;
1150
1151 // record exit from a method if compiled while Dtrace is turned on.
1152 if (do_synch || C->env()->dtrace_method_probes() || _replaced_nodes_for_exceptions) {
1153 // First move the exception list out of _exits:
1154 GraphKit kit(_exits.transfer_exceptions_into_jvms());
1155 SafePointNode* normal_map = kit.map(); // keep this guy safe
|