src/share/vm/opto/callGenerator.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File
6934604 Cdiff src/share/vm/opto/callGenerator.cpp
src/share/vm/opto/callGenerator.cpp
Print this page
*** 132,142 ****
if (kit.C->log() != NULL) {
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
}
! CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(tf(), target, method(), kit.bci());
_call_node = call; // Save the call node in case we need it later
if (!is_static) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
kit.null_check_receiver_before_call(method());
--- 132,142 ----
if (kit.C->log() != NULL) {
kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
}
! CallStaticJavaNode *call = new (kit.C) CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
_call_node = call; // Save the call node in case we need it later
if (!is_static) {
// Make an explicit receiver null_check as part of this call.
// Since we share a map with the caller, his JVMS gets adjusted.
kit.null_check_receiver_before_call(method());
*** 302,334 ****
};
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
! if (call_node() == NULL || call_node()->outcnt() == 0 ||
! call_node()->in(0) == NULL || call_node()->in(0)->is_top()) {
return;
}
! const TypeTuple *r = call_node()->tf()->domain();
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
! if (call_node()->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
}
! if (call_node()->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
! CallStaticJavaNode* call = call_node();
// Make a clone of the JVMState that appropriate to use for driving a parse
! Compile* C = Compile::current();
! JVMState* jvms = call->jvms()->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new (C) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
map->init_req(i1, call->in(i1));
}
--- 302,339 ----
};
void LateInlineCallGenerator::do_late_inline() {
// Can't inline it
! CallStaticJavaNode* call = call_node();
! if (call == NULL || call->outcnt() == 0 ||
! call->in(0) == NULL || call->in(0)->is_top()) {
return;
}
! const TypeTuple *r = call->tf()->domain();
for (int i1 = 0; i1 < method()->arg_size(); i1++) {
! if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
}
! if (call->in(TypeFunc::Memory)->is_top()) {
assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
return;
}
! Compile* C = Compile::current();
! // Remove inlined methods from Compiler's lists.
! if (call->is_macro()) {
! C->remove_macro_node(call);
! }
// Make a clone of the JVMState that appropriate to use for driving a parse
! JVMState* old_jvms = call->jvms();
! JVMState* jvms = old_jvms->clone_shallow(C);
uint size = call->req();
SafePointNode* map = new (C) SafePointNode(size, jvms);
for (uint i1 = 0; i1 < size; i1++) {
map->init_req(i1, call->in(i1));
}
*** 338,357 ****
Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
C->initial_gvn()->set_type_bottom(mem);
map->set_req(TypeFunc::Memory, mem);
}
! // Make enough space for the expression stack and transfer the incoming arguments
! int nargs = method()->arg_size();
jvms->set_map(map);
map->ensure_stack(jvms, jvms->method()->max_stack());
! if (nargs > 0) {
! for (int i1 = 0; i1 < nargs; i1++) {
! map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1));
! }
}
if (!do_late_inline_check(jvms)) {
map->disconnect_inputs(NULL, C);
return;
}
--- 343,369 ----
Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
C->initial_gvn()->set_type_bottom(mem);
map->set_req(TypeFunc::Memory, mem);
}
! uint nargs = method()->arg_size();
! // blow away old call arguments
! Node* top = C->top();
! for (uint i1 = 0; i1 < nargs; i1++) {
! map->set_req(TypeFunc::Parms + i1, top);
! }
jvms->set_map(map);
+
+ // Make enough space in the expression stack to transfer
+ // the incoming arguments and return value.
map->ensure_stack(jvms, jvms->method()->max_stack());
! for (uint i1 = 0; i1 < nargs; i1++) {
! map->set_req(jvms->argoff() + i1, call->in(TypeFunc::Parms + i1));
}
+ // This check is done here because for_method_handle_inline() method
+ // needs jvms for inlined state.
if (!do_late_inline_check(jvms)) {
map->disconnect_inputs(NULL, C);
return;
}
*** 478,487 ****
--- 490,519 ----
CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
return new LateInlineStringCallGenerator(method, inline_cg);
}
+ class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
+
+ public:
+ LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
+ LateInlineCallGenerator(method, inline_cg) {}
+
+ virtual JVMState* generate(JVMState* jvms) {
+ Compile *C = Compile::current();
+ C->print_inlining_skip(this);
+
+ C->add_boxing_late_inline(this);
+
+ JVMState* new_jvms = DirectCallGenerator::generate(jvms);
+ return new_jvms;
+ }
+ };
+
+ CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
+ return new LateInlineBoxingCallGenerator(method, inline_cg);
+ }
//---------------------------WarmCallGenerator--------------------------------
// Internal class which handles initial deferral of inlining decisions.
class WarmCallGenerator : public CallGenerator {
WarmCallInfo* _call_info;
src/share/vm/opto/callGenerator.cpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File