1 /*
2 * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "ci/ciCallSite.hpp"
28 #include "ci/ciObjArray.hpp"
29 #include "ci/ciMemberName.hpp"
30 #include "ci/ciMethodHandle.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callGenerator.hpp"
35 #include "opto/callnode.hpp"
36 #include "opto/castnode.hpp"
37 #include "opto/cfgnode.hpp"
38 #include "opto/parse.hpp"
39 #include "opto/rootnode.hpp"
40 #include "opto/runtime.hpp"
41 #include "opto/subnode.hpp"
42 #include "opto/valuetypenode.hpp"
43 #include "runtime/sharedRuntime.hpp"
44
45 // Utility function.
46 const TypeFunc* CallGenerator::tf() const {
47 return TypeFunc::make(method());
48 }
49
50 bool CallGenerator::is_inlined_mh_linker(JVMState* jvms, ciMethod* callee) {
51 ciMethod* symbolic_info = jvms->method()->get_method_at_bci(jvms->bci());
52 return symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic();
53 }
54
55 //-----------------------------ParseGenerator---------------------------------
56 // Internal class which handles all direct bytecode traversal.
57 class ParseGenerator : public InlineCallGenerator {
58 private:
59 bool _is_osr;
60 float _expected_uses;
61
62 public:
63 ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
64 : InlineCallGenerator(method)
65 {
66 _is_osr = is_osr;
67 _expected_uses = expected_uses;
68 assert(InlineTree::check_can_parse(method) == NULL, "parse must be possible");
69 }
70
71 virtual bool is_parse() const { return true; }
72 virtual JVMState* generate(JVMState* jvms);
73 int is_osr() { return _is_osr; }
74
75 };
76
77 JVMState* ParseGenerator::generate(JVMState* jvms) {
78 Compile* C = Compile::current();
79 C->print_inlining_update(this);
80
81 if (is_osr()) {
82 // The JVMS for a OSR has a single argument (see its TypeFunc).
83 assert(jvms->depth() == 1, "no inline OSR");
84 }
85
86 if (C->failing()) {
87 return NULL; // bailing out of the compile; do not try to parse
88 }
89
90 Parse parser(jvms, method(), _expected_uses);
91 // Grab signature for matching/allocation
92 #ifdef ASSERT
93 if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) {
94 MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag);
95 assert(C->env()->system_dictionary_modification_counter_changed(),
96 "Must invalidate if TypeFuncs differ");
97 }
98 #endif
99
100 GraphKit& exits = parser.exits();
101
102 if (C->failing()) {
103 while (exits.pop_exception_state() != NULL) ;
104 return NULL;
105 }
106
107 assert(exits.jvms()->same_calls_as(jvms), "sanity");
108
109 // Simply return the exit state of the parser,
110 // augmented by any exceptional states.
111 return exits.transfer_exceptions_into_jvms();
112 }
113
114 //---------------------------DirectCallGenerator------------------------------
115 // Internal class which handles all out-of-line calls w/o receiver type checks.
116 class DirectCallGenerator : public CallGenerator {
117 private:
118 CallStaticJavaNode* _call_node;
119 // Force separate memory and I/O projections for the exceptional
120 // paths to facilitate late inlinig.
121 bool _separate_io_proj;
122
123 public:
124 DirectCallGenerator(ciMethod* method, bool separate_io_proj)
125 : CallGenerator(method),
126 _separate_io_proj(separate_io_proj)
127 {
128 }
129 virtual JVMState* generate(JVMState* jvms);
130
131 CallStaticJavaNode* call_node() const { return _call_node; }
132 };
133
134 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
135 GraphKit kit(jvms);
136 kit.C->print_inlining_update(this);
137 PhaseGVN& gvn = kit.gvn();
138 bool is_static = method()->is_static();
139 address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
140 : SharedRuntime::get_resolve_opt_virtual_call_stub();
141
142 if (kit.C->log() != NULL) {
143 kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
144 }
145
146 CallStaticJavaNode *call = new CallStaticJavaNode(kit.C, tf(), target, method(), kit.bci());
147 if (is_inlined_mh_linker(jvms, method())) {
148 // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
149 // additional information about the method being invoked should be attached
150 // to the call site to make resolution logic work
151 // (see SharedRuntime::resolve_static_call_C).
152 call->set_override_symbolic_info(true);
153 }
154 _call_node = call; // Save the call node in case we need it later
155 if (!is_static) {
156 if (kit.argument(0)->is_ValueType()) {
157 ValueTypeNode* vt = kit.argument(0)->as_ValueType();
158 vt->store_to_memory(&kit);
159 } else {
160 // Make an explicit receiver null_check as part of this call.
161 // Since we share a map with the caller, his JVMS gets adjusted.
162 kit.null_check_receiver_before_call(method());
163 }
164 if (kit.stopped()) {
165 // And dump it back to the caller, decorated with any exceptions:
166 return kit.transfer_exceptions_into_jvms();
167 }
168 // Mark the call node as virtual, sort of:
169 call->set_optimized_virtual(true);
170 if (method()->is_method_handle_intrinsic() ||
171 method()->is_compiled_lambda_form()) {
172 call->set_method_handle_invoke(true);
173 }
174 }
175 kit.set_arguments_for_java_call(call);
176 kit.set_edges_for_java_call(call, false, _separate_io_proj);
177 Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
178 // Check if return value is a value type pointer
179 if (gvn.type(ret)->isa_valuetypeptr()) {
180 // Create ValueTypeNode from the oop and replace the return value
181 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
182 kit.push_node(T_VALUETYPE, vt);
183 } else {
184 kit.push_node(method()->return_type()->basic_type(), ret);
185 }
186 return kit.transfer_exceptions_into_jvms();
187 }
188
189 //--------------------------VirtualCallGenerator------------------------------
190 // Internal class which handles all out-of-line calls checking receiver type.
191 class VirtualCallGenerator : public CallGenerator {
192 private:
193 int _vtable_index;
194 public:
195 VirtualCallGenerator(ciMethod* method, int vtable_index)
196 : CallGenerator(method), _vtable_index(vtable_index)
197 {
198 assert(vtable_index == Method::invalid_vtable_index ||
199 vtable_index >= 0, "either invalid or usable");
200 }
201 virtual bool is_virtual() const { return true; }
202 virtual JVMState* generate(JVMState* jvms);
203 };
204
205 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
206 GraphKit kit(jvms);
207 Node* receiver = kit.argument(0);
208 PhaseGVN& gvn = kit.gvn();
209 kit.C->print_inlining_update(this);
210
211 if (kit.C->log() != NULL) {
212 kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
213 }
214
215 // If the receiver is a constant null, do not torture the system
216 // by attempting to call through it. The compile will proceed
217 // correctly, but may bail out in final_graph_reshaping, because
218 // the call instruction will have a seemingly deficient out-count.
219 // (The bailout says something misleading about an "infinite loop".)
220 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
221 assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
222 ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
223 int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
224 kit.inc_sp(arg_size); // restore arguments
225 kit.uncommon_trap(Deoptimization::Reason_null_check,
226 Deoptimization::Action_none,
227 NULL, "null receiver");
228 return kit.transfer_exceptions_into_jvms();
229 }
230
231 // Ideally we would unconditionally do a null check here and let it
232 // be converted to an implicit check based on profile information.
233 // However currently the conversion to implicit null checks in
234 // Block::implicit_null_check() only looks for loads and stores, not calls.
235 ciMethod *caller = kit.method();
236 ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
237 if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
238 ((ImplicitNullCheckThreshold > 0) && caller_md &&
239 (caller_md->trap_count(Deoptimization::Reason_null_check)
240 >= (uint)ImplicitNullCheckThreshold))) {
241 // Make an explicit receiver null_check as part of this call.
242 // Since we share a map with the caller, his JVMS gets adjusted.
243 receiver = kit.null_check_receiver_before_call(method());
244 if (kit.stopped()) {
245 // And dump it back to the caller, decorated with any exceptions:
246 return kit.transfer_exceptions_into_jvms();
247 }
248 }
249
250 assert(!method()->is_static(), "virtual call must not be to static");
251 assert(!method()->is_final(), "virtual call should not be to final");
252 assert(!method()->is_private(), "virtual call should not be to private");
253 assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
254 "no vtable calls if +UseInlineCaches ");
255 address target = SharedRuntime::get_resolve_virtual_call_stub();
256 // Normal inline cache used for call
257 CallDynamicJavaNode *call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci());
258 if (is_inlined_mh_linker(jvms, method())) {
259 // To be able to issue a direct call (optimized virtual or virtual)
260 // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
261 // about the method being invoked should be attached to the call site to
262 // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
263 call->set_override_symbolic_info(true);
264 }
265 kit.set_arguments_for_java_call(call);
266 kit.set_edges_for_java_call(call);
267 Node* ret = kit.set_results_for_java_call(call);
268 // Check if return value is a value type pointer
269 if (gvn.type(ret)->isa_valuetypeptr()) {
270 // Create ValueTypeNode from the oop and replace the return value
271 Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret);
272 kit.push_node(T_VALUETYPE, vt);
273 } else {
274 kit.push_node(method()->return_type()->basic_type(), ret);
275 }
276
277 // Represent the effect of an implicit receiver null_check
278 // as part of this call. Since we share a map with the caller,
279 // his JVMS gets adjusted.
280 kit.cast_not_null(receiver);
281 return kit.transfer_exceptions_into_jvms();
282 }
283
284 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
285 if (InlineTree::check_can_parse(m) != NULL) return NULL;
286 return new ParseGenerator(m, expected_uses);
287 }
288
289 // As a special case, the JVMS passed to this CallGenerator is
290 // for the method execution already in progress, not just the JVMS
291 // of the caller. Thus, this CallGenerator cannot be mixed with others!
292 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
293 if (InlineTree::check_can_parse(m) != NULL) return NULL;
294 float past_uses = m->interpreter_invocation_count();
295 float expected_uses = past_uses;
296 return new ParseGenerator(m, expected_uses, true);
297 }
298
299 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
300 assert(!m->is_abstract(), "for_direct_call mismatch");
301 return new DirectCallGenerator(m, separate_io_proj);
302 }
303
304 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
305 assert(!m->is_static(), "for_virtual_call mismatch");
306 assert(!m->is_method_handle_intrinsic(), "should be a direct call");
307 return new VirtualCallGenerator(m, vtable_index);
308 }
309
310 // Allow inlining decisions to be delayed
311 class LateInlineCallGenerator : public DirectCallGenerator {
312 private:
313 // unique id for log compilation
314 jlong _unique_id;
315
316 protected:
317 CallGenerator* _inline_cg;
318 virtual bool do_late_inline_check(JVMState* jvms) { return true; }
319
320 public:
321 LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
322 DirectCallGenerator(method, true), _inline_cg(inline_cg), _unique_id(0) {}
323
324 virtual bool is_late_inline() const { return true; }
325
326 // Convert the CallStaticJava into an inline
327 virtual void do_late_inline();
328
329 virtual JVMState* generate(JVMState* jvms) {
330 Compile *C = Compile::current();
331
332 C->log_inline_id(this);
333
334 // Record that this call site should be revisited once the main
335 // parse is finished.
336 if (!is_mh_late_inline()) {
337 C->add_late_inline(this);
338 }
339
340 // Emit the CallStaticJava and request separate projections so
341 // that the late inlining logic can distinguish between fall
342 // through and exceptional uses of the memory and io projections
343 // as is done for allocations and macro expansion.
344 return DirectCallGenerator::generate(jvms);
345 }
346
347 virtual void print_inlining_late(const char* msg) {
348 CallNode* call = call_node();
349 Compile* C = Compile::current();
350 C->print_inlining_assert_ready();
351 C->print_inlining(method(), call->jvms()->depth()-1, call->jvms()->bci(), msg);
352 C->print_inlining_move_to(this);
353 C->print_inlining_update_delayed(this);
354 }
355
356 virtual void set_unique_id(jlong id) {
357 _unique_id = id;
358 }
359
360 virtual jlong unique_id() const {
361 return _unique_id;
362 }
363 };
364
365 void LateInlineCallGenerator::do_late_inline() {
366 // Can't inline it
367 CallStaticJavaNode* call = call_node();
368 if (call == NULL || call->outcnt() == 0 ||
369 call->in(0) == NULL || call->in(0)->is_top()) {
370 return;
371 }
372
373 const TypeTuple *r = call->tf()->domain();
374 for (int i1 = 0; i1 < method()->arg_size(); i1++) {
375 if (call->in(TypeFunc::Parms + i1)->is_top() && r->field_at(TypeFunc::Parms + i1) != Type::HALF) {
376 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
377 return;
378 }
379 }
380
381 if (call->in(TypeFunc::Memory)->is_top()) {
382 assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
383 return;
384 }
385
386 Compile* C = Compile::current();
387 // Remove inlined methods from Compiler's lists.
388 if (call->is_macro()) {
389 C->remove_macro_node(call);
390 }
391
392 // Make a clone of the JVMState that appropriate to use for driving a parse
393 JVMState* old_jvms = call->jvms();
394 JVMState* jvms = old_jvms->clone_shallow(C);
395 uint size = call->req();
396 SafePointNode* map = new SafePointNode(size, jvms);
397 for (uint i1 = 0; i1 < size; i1++) {
398 map->init_req(i1, call->in(i1));
399 }
400
401 // Make sure the state is a MergeMem for parsing.
402 if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
403 Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
404 C->initial_gvn()->set_type_bottom(mem);
405 map->set_req(TypeFunc::Memory, mem);
406 }
407
408 uint nargs = method()->arg_size();
409 // blow away old call arguments
410 Node* top = C->top();
411 for (uint i1 = 0; i1 < nargs; i1++) {
412 map->set_req(TypeFunc::Parms + i1, top);
413 }
414 jvms->set_map(map);
415
416 // Make enough space in the expression stack to transfer
417 // the incoming arguments and return value.
418 map->ensure_stack(jvms, jvms->method()->max_stack());
419 for (uint i1 = 0; i1 < nargs; i1++) {
420 map->set_argument(jvms, i1, call->in(TypeFunc::Parms + i1));
421 }
422
423 C->print_inlining_assert_ready();
424
425 C->print_inlining_move_to(this);
426
427 C->log_late_inline(this);
428
429 // This check is done here because for_method_handle_inline() method
430 // needs jvms for inlined state.
431 if (!do_late_inline_check(jvms)) {
432 map->disconnect_inputs(NULL, C);
433 return;
434 }
435
436 // Setup default node notes to be picked up by the inlining
437 Node_Notes* old_nn = C->node_notes_at(call->_idx);
438 if (old_nn != NULL) {
439 Node_Notes* entry_nn = old_nn->clone(C);
440 entry_nn->set_jvms(jvms);
441 C->set_default_node_notes(entry_nn);
442 }
443
444 // Now perform the inlining using the synthesized JVMState
445 JVMState* new_jvms = _inline_cg->generate(jvms);
446 if (new_jvms == NULL) return; // no change
447 if (C->failing()) return;
448
449 // Capture any exceptional control flow
450 GraphKit kit(new_jvms);
451
452 // Find the result object
453 Node* result = C->top();
454 int result_size = method()->return_type()->size();
455 if (result_size != 0 && !kit.stopped()) {
456 result = (result_size == 1) ? kit.pop() : kit.pop_pair();
457 }
458
459 C->set_has_loops(C->has_loops() || _inline_cg->method()->has_loops());
460 C->env()->notice_inlined_method(_inline_cg->method());
461 C->set_inlining_progress(true);
462
463 kit.replace_call(call, result, true);
464 }
465
466
467 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
468 return new LateInlineCallGenerator(method, inline_cg);
469 }
470
471 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
472 ciMethod* _caller;
473 int _attempt;
474 bool _input_not_const;
475
476 virtual bool do_late_inline_check(JVMState* jvms);
477 virtual bool already_attempted() const { return _attempt > 0; }
478
479 public:
480 LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
481 LateInlineCallGenerator(callee, NULL), _caller(caller), _attempt(0), _input_not_const(input_not_const) {}
482
483 virtual bool is_mh_late_inline() const { return true; }
484
485 virtual JVMState* generate(JVMState* jvms) {
486 JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
487
488 Compile* C = Compile::current();
489 if (_input_not_const) {
490 // inlining won't be possible so no need to enqueue right now.
491 call_node()->set_generator(this);
492 } else {
493 C->add_late_inline(this);
494 }
495 return new_jvms;
496 }
497 };
498
499 bool LateInlineMHCallGenerator::do_late_inline_check(JVMState* jvms) {
500
501 CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), _input_not_const);
502
503 Compile::current()->print_inlining_update_delayed(this);
504
505 if (!_input_not_const) {
506 _attempt++;
507 }
508
509 if (cg != NULL && cg->is_inline()) {
510 assert(!cg->is_late_inline(), "we're doing late inlining");
511 _inline_cg = cg;
512 Compile::current()->dec_number_of_mh_late_inlines();
513 return true;
514 }
515
516 call_node()->set_generator(this);
517 return false;
518 }
519
520 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
521 Compile::current()->inc_number_of_mh_late_inlines();
522 CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
523 return cg;
524 }
525
526 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
527
528 public:
529 LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
530 LateInlineCallGenerator(method, inline_cg) {}
531
532 virtual JVMState* generate(JVMState* jvms) {
533 Compile *C = Compile::current();
534
535 C->log_inline_id(this);
536
537 C->add_string_late_inline(this);
538
539 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
540 return new_jvms;
541 }
542
543 virtual bool is_string_late_inline() const { return true; }
544 };
545
546 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
547 return new LateInlineStringCallGenerator(method, inline_cg);
548 }
549
550 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
551
552 public:
553 LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
554 LateInlineCallGenerator(method, inline_cg) {}
555
556 virtual JVMState* generate(JVMState* jvms) {
557 Compile *C = Compile::current();
558
559 C->log_inline_id(this);
560
561 C->add_boxing_late_inline(this);
562
563 JVMState* new_jvms = DirectCallGenerator::generate(jvms);
564 return new_jvms;
565 }
566 };
567
568 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
569 return new LateInlineBoxingCallGenerator(method, inline_cg);
570 }
571
572 //---------------------------WarmCallGenerator--------------------------------
573 // Internal class which handles initial deferral of inlining decisions.
574 class WarmCallGenerator : public CallGenerator {
575 WarmCallInfo* _call_info;
576 CallGenerator* _if_cold;
577 CallGenerator* _if_hot;
578 bool _is_virtual; // caches virtuality of if_cold
579 bool _is_inline; // caches inline-ness of if_hot
580
581 public:
582 WarmCallGenerator(WarmCallInfo* ci,
583 CallGenerator* if_cold,
584 CallGenerator* if_hot)
585 : CallGenerator(if_cold->method())
586 {
587 assert(method() == if_hot->method(), "consistent choices");
588 _call_info = ci;
589 _if_cold = if_cold;
590 _if_hot = if_hot;
591 _is_virtual = if_cold->is_virtual();
592 _is_inline = if_hot->is_inline();
593 }
594
595 virtual bool is_inline() const { return _is_inline; }
596 virtual bool is_virtual() const { return _is_virtual; }
597 virtual bool is_deferred() const { return true; }
598
599 virtual JVMState* generate(JVMState* jvms);
600 };
601
602
603 CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
604 CallGenerator* if_cold,
605 CallGenerator* if_hot) {
606 return new WarmCallGenerator(ci, if_cold, if_hot);
607 }
608
609 JVMState* WarmCallGenerator::generate(JVMState* jvms) {
610 Compile* C = Compile::current();
611 C->print_inlining_update(this);
612
613 if (C->log() != NULL) {
614 C->log()->elem("warm_call bci='%d'", jvms->bci());
615 }
616 jvms = _if_cold->generate(jvms);
617 if (jvms != NULL) {
618 Node* m = jvms->map()->control();
619 if (m->is_CatchProj()) m = m->in(0); else m = C->top();
620 if (m->is_Catch()) m = m->in(0); else m = C->top();
621 if (m->is_Proj()) m = m->in(0); else m = C->top();
622 if (m->is_CallJava()) {
623 _call_info->set_call(m->as_Call());
624 _call_info->set_hot_cg(_if_hot);
625 #ifndef PRODUCT
626 if (PrintOpto || PrintOptoInlining) {
627 tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
628 tty->print("WCI: ");
629 _call_info->print();
630 }
631 #endif
632 _call_info->set_heat(_call_info->compute_heat());
633 C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
634 }
635 }
636 return jvms;
637 }
638
639 void WarmCallInfo::make_hot() {
640 Unimplemented();
641 }
642
643 void WarmCallInfo::make_cold() {
644 // No action: Just dequeue.
645 }
646
647
648 //------------------------PredictedCallGenerator------------------------------
649 // Internal class which handles all out-of-line calls checking receiver type.
650 class PredictedCallGenerator : public CallGenerator {
651 ciKlass* _predicted_receiver;
652 CallGenerator* _if_missed;
653 CallGenerator* _if_hit;
654 float _hit_prob;
655
656 public:
657 PredictedCallGenerator(ciKlass* predicted_receiver,
658 CallGenerator* if_missed,
659 CallGenerator* if_hit, float hit_prob)
660 : CallGenerator(if_missed->method())
661 {
662 // The call profile data may predict the hit_prob as extreme as 0 or 1.
663 // Remove the extremes values from the range.
664 if (hit_prob > PROB_MAX) hit_prob = PROB_MAX;
665 if (hit_prob < PROB_MIN) hit_prob = PROB_MIN;
666
667 _predicted_receiver = predicted_receiver;
668 _if_missed = if_missed;
669 _if_hit = if_hit;
670 _hit_prob = hit_prob;
671 }
672
673 virtual bool is_virtual() const { return true; }
674 virtual bool is_inline() const { return _if_hit->is_inline(); }
675 virtual bool is_deferred() const { return _if_hit->is_deferred(); }
676
677 virtual JVMState* generate(JVMState* jvms);
678 };
679
680
681 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
682 CallGenerator* if_missed,
683 CallGenerator* if_hit,
684 float hit_prob) {
685 return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob);
686 }
687
688
689 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
690 GraphKit kit(jvms);
691 kit.C->print_inlining_update(this);
692 PhaseGVN& gvn = kit.gvn();
693 // We need an explicit receiver null_check before checking its type.
694 // We share a map with the caller, so his JVMS gets adjusted.
695 Node* receiver = kit.argument(0);
696 CompileLog* log = kit.C->log();
697 if (log != NULL) {
698 log->elem("predicted_call bci='%d' klass='%d'",
699 jvms->bci(), log->identify(_predicted_receiver));
700 }
701
702 receiver = kit.null_check_receiver_before_call(method());
703 if (kit.stopped()) {
704 return kit.transfer_exceptions_into_jvms();
705 }
706
707 // Make a copy of the replaced nodes in case we need to restore them
708 ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
709 replaced_nodes.clone();
710
711 Node* exact_receiver = receiver; // will get updated in place...
712 Node* slow_ctl = kit.type_check_receiver(receiver,
713 _predicted_receiver, _hit_prob,
714 &exact_receiver);
715
716 SafePointNode* slow_map = NULL;
717 JVMState* slow_jvms = NULL;
718 { PreserveJVMState pjvms(&kit);
719 kit.set_control(slow_ctl);
720 if (!kit.stopped()) {
721 slow_jvms = _if_missed->generate(kit.sync_jvms());
722 if (kit.failing())
723 return NULL; // might happen because of NodeCountInliningCutoff
724 assert(slow_jvms != NULL, "must be");
725 kit.add_exception_states_from(slow_jvms);
726 kit.set_map(slow_jvms->map());
727 if (!kit.stopped())
728 slow_map = kit.stop();
729 }
730 }
731
732 if (kit.stopped()) {
733 // Instance exactly does not matches the desired type.
734 kit.set_jvms(slow_jvms);
735 return kit.transfer_exceptions_into_jvms();
736 }
737
738 // fall through if the instance exactly matches the desired type
739 kit.replace_in_map(receiver, exact_receiver);
740
741 // Make the hot call:
742 JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
743 if (new_jvms == NULL) {
744 // Inline failed, so make a direct call.
745 assert(_if_hit->is_inline(), "must have been a failed inline");
746 CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
747 new_jvms = cg->generate(kit.sync_jvms());
748 }
749 kit.add_exception_states_from(new_jvms);
750 kit.set_jvms(new_jvms);
751
752 // Need to merge slow and fast?
753 if (slow_map == NULL) {
754 // The fast path is the only path remaining.
755 return kit.transfer_exceptions_into_jvms();
756 }
757
758 if (kit.stopped()) {
759 // Inlined method threw an exception, so it's just the slow path after all.
760 kit.set_jvms(slow_jvms);
761 return kit.transfer_exceptions_into_jvms();
762 }
763
764 // There are 2 branches and the replaced nodes are only valid on
765 // one: restore the replaced nodes to what they were before the
766 // branch.
767 kit.map()->set_replaced_nodes(replaced_nodes);
768
769 // Finish the diamond.
770 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
771 RegionNode* region = new RegionNode(3);
772 region->init_req(1, kit.control());
773 region->init_req(2, slow_map->control());
774 kit.set_control(gvn.transform(region));
775 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
776 iophi->set_req(2, slow_map->i_o());
777 kit.set_i_o(gvn.transform(iophi));
778 // Merge memory
779 kit.merge_memory(slow_map->merged_memory(), region, 2);
780 // Transform new memory Phis.
781 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
782 Node* phi = mms.memory();
783 if (phi->is_Phi() && phi->in(0) == region) {
784 mms.set_memory(gvn.transform(phi));
785 }
786 }
787 uint tos = kit.jvms()->stkoff() + kit.sp();
788 uint limit = slow_map->req();
789 for (uint i = TypeFunc::Parms; i < limit; i++) {
790 // Skip unused stack slots; fast forward to monoff();
791 if (i == tos) {
792 i = kit.jvms()->monoff();
793 if( i >= limit ) break;
794 }
795 Node* m = kit.map()->in(i);
796 Node* n = slow_map->in(i);
797 if (m != n) {
798 const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
799 Node* phi = PhiNode::make(region, m, t);
800 phi->set_req(2, n);
801 kit.map()->set_req(i, gvn.transform(phi));
802 }
803 }
804 return kit.transfer_exceptions_into_jvms();
805 }
806
807
808 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden) {
809 assert(callee->is_method_handle_intrinsic() ||
810 callee->is_compiled_lambda_form(), "for_method_handle_call mismatch");
811 bool input_not_const;
812 CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, input_not_const);
813 Compile* C = Compile::current();
814 if (cg != NULL) {
815 if (!delayed_forbidden && AlwaysIncrementalInline) {
816 return CallGenerator::for_late_inline(callee, cg);
817 } else {
818 return cg;
819 }
820 }
821 int bci = jvms->bci();
822 ciCallProfile profile = caller->call_profile_at_bci(bci);
823 int call_site_count = caller->scale_count(profile.count());
824
825 if (IncrementalInline && call_site_count > 0 &&
826 (input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())) {
827 return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
828 } else {
829 // Out-of-line call.
830 return CallGenerator::for_direct_call(callee);
831 }
832 }
833
834 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const) {
835 GraphKit kit(jvms);
836 PhaseGVN& gvn = kit.gvn();
837 Compile* C = kit.C;
838 vmIntrinsics::ID iid = callee->intrinsic_id();
839 input_not_const = true;
840 switch (iid) {
841 case vmIntrinsics::_invokeBasic:
842 {
843 // Get MethodHandle receiver:
844 Node* receiver = kit.argument(0);
845 if (receiver->Opcode() == Op_ConP) {
846 input_not_const = false;
847 const TypeOopPtr* oop_ptr = receiver->bottom_type()->is_oopptr();
848 ciMethod* target = oop_ptr->const_oop()->as_method_handle()->get_vmtarget();
849 const int vtable_index = Method::invalid_vtable_index;
850 CallGenerator* cg = C->call_generator(target, vtable_index,
851 false /* call_does_dispatch */,
852 jvms,
853 true /* allow_inline */,
854 PROB_ALWAYS);
855 return cg;
856 } else {
857 const char* msg = "receiver not constant";
858 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
859 C->log_inline_failure(msg);
860 }
861 }
862 break;
863
864 case vmIntrinsics::_linkToVirtual:
865 case vmIntrinsics::_linkToStatic:
866 case vmIntrinsics::_linkToSpecial:
867 case vmIntrinsics::_linkToInterface:
868 {
869 // Get MemberName argument:
870 Node* member_name = kit.argument(callee->arg_size() - 1);
871 if (member_name->Opcode() == Op_ConP) {
872 input_not_const = false;
873 const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
874 ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
875
876 // In lambda forms we erase signature types to avoid resolving issues
877 // involving class loaders. When we optimize a method handle invoke
878 // to a direct call we must cast the receiver and arguments to its
879 // actual types.
880 ciSignature* signature = target->signature();
881 const int receiver_skip = target->is_static() ? 0 : 1;
882 // Cast receiver to its type.
883 if (!target->is_static()) {
884 Node* arg = kit.argument(0);
885 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
886 const Type* sig_type = TypeOopPtr::make_from_klass(signature->accessing_klass());
887 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
888 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
889 kit.set_argument(0, cast_obj);
890 }
891 }
892 // Cast reference arguments to its type.
893 for (int i = 0, j = 0; i < signature->count(); i++) {
894 ciType* t = signature->type_at(i);
895 if (t->is_klass()) {
896 Node* arg = kit.argument(receiver_skip + j);
897 const TypeOopPtr* arg_type = arg->bottom_type()->isa_oopptr();
898 const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass());
899 if (arg_type != NULL && !arg_type->higher_equal(sig_type)) {
900 Node* cast_obj = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type));
901 kit.set_argument(receiver_skip + j, cast_obj);
902 }
903 }
904 j += t->size(); // long and double take two slots
905 }
906
907 // Try to get the most accurate receiver type
908 const bool is_virtual = (iid == vmIntrinsics::_linkToVirtual);
909 const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
910 int vtable_index = Method::invalid_vtable_index;
911 bool call_does_dispatch = false;
912
913 ciKlass* speculative_receiver_type = NULL;
914 if (is_virtual_or_interface) {
915 ciInstanceKlass* klass = target->holder();
916 Node* receiver_node = kit.argument(0);
917 const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
918 // call_does_dispatch and vtable_index are out-parameters. They might be changed.
919 // optimize_virtual_call() takes 2 different holder
920 // arguments for a corner case that doesn't apply here (see
921 // Parse::do_call())
922 target = C->optimize_virtual_call(caller, jvms->bci(), klass, klass,
923 target, receiver_type, is_virtual,
924 call_does_dispatch, vtable_index, // out-parameters
925 false /* check_access */);
926 // We lack profiling at this call but type speculation may
927 // provide us with a type
928 speculative_receiver_type = (receiver_type != NULL) ? receiver_type->speculative_type() : NULL;
929 }
930 CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
931 true /* allow_inline */,
932 PROB_ALWAYS,
933 speculative_receiver_type);
934 return cg;
935 } else {
936 const char* msg = "member_name not constant";
937 if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), msg);
938 C->log_inline_failure(msg);
939 }
940 }
941 break;
942
943 default:
944 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid));
945 break;
946 }
947 return NULL;
948 }
949
950
951 //------------------------PredicatedIntrinsicGenerator------------------------------
952 // Internal class which handles all predicated Intrinsic calls.
953 class PredicatedIntrinsicGenerator : public CallGenerator {
954 CallGenerator* _intrinsic;
955 CallGenerator* _cg;
956
957 public:
958 PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
959 CallGenerator* cg)
960 : CallGenerator(cg->method())
961 {
962 _intrinsic = intrinsic;
963 _cg = cg;
964 }
965
966 virtual bool is_virtual() const { return true; }
967 virtual bool is_inlined() const { return true; }
968 virtual bool is_intrinsic() const { return true; }
969
970 virtual JVMState* generate(JVMState* jvms);
971 };
972
973
974 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
975 CallGenerator* cg) {
976 return new PredicatedIntrinsicGenerator(intrinsic, cg);
977 }
978
979
980 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
981 // The code we want to generate here is:
982 // if (receiver == NULL)
983 // uncommon_Trap
984 // if (predicate(0))
985 // do_intrinsic(0)
986 // else
987 // if (predicate(1))
988 // do_intrinsic(1)
989 // ...
990 // else
991 // do_java_comp
992
993 GraphKit kit(jvms);
994 PhaseGVN& gvn = kit.gvn();
995
996 CompileLog* log = kit.C->log();
997 if (log != NULL) {
998 log->elem("predicated_intrinsic bci='%d' method='%d'",
999 jvms->bci(), log->identify(method()));
1000 }
1001
1002 if (!method()->is_static()) {
1003 // We need an explicit receiver null_check before checking its type in predicate.
1004 // We share a map with the caller, so his JVMS gets adjusted.
1005 Node* receiver = kit.null_check_receiver_before_call(method());
1006 if (kit.stopped()) {
1007 return kit.transfer_exceptions_into_jvms();
1008 }
1009 }
1010
1011 int n_predicates = _intrinsic->predicates_count();
1012 assert(n_predicates > 0, "sanity");
1013
1014 JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1015
1016 // Region for normal compilation code if intrinsic failed.
1017 Node* slow_region = new RegionNode(1);
1018
1019 int results = 0;
1020 for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1021 #ifdef ASSERT
1022 JVMState* old_jvms = kit.jvms();
1023 SafePointNode* old_map = kit.map();
1024 Node* old_io = old_map->i_o();
1025 Node* old_mem = old_map->memory();
1026 Node* old_exc = old_map->next_exception();
1027 #endif
1028 Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1029 #ifdef ASSERT
1030 // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1031 assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1032 SafePointNode* new_map = kit.map();
1033 assert(old_io == new_map->i_o(), "generate_predicate should not change i_o");
1034 assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1035 assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1036 #endif
1037 if (!kit.stopped()) {
1038 PreserveJVMState pjvms(&kit);
1039 // Generate intrinsic code:
1040 JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1041 if (new_jvms == NULL) {
1042 // Intrinsic failed, use normal compilation path for this predicate.
1043 slow_region->add_req(kit.control());
1044 } else {
1045 kit.add_exception_states_from(new_jvms);
1046 kit.set_jvms(new_jvms);
1047 if (!kit.stopped()) {
1048 result_jvms[results++] = kit.jvms();
1049 }
1050 }
1051 }
1052 if (else_ctrl == NULL) {
1053 else_ctrl = kit.C->top();
1054 }
1055 kit.set_control(else_ctrl);
1056 }
1057 if (!kit.stopped()) {
1058 // Final 'else' after predicates.
1059 slow_region->add_req(kit.control());
1060 }
1061 if (slow_region->req() > 1) {
1062 PreserveJVMState pjvms(&kit);
1063 // Generate normal compilation code:
1064 kit.set_control(gvn.transform(slow_region));
1065 JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1066 if (kit.failing())
1067 return NULL; // might happen because of NodeCountInliningCutoff
1068 assert(new_jvms != NULL, "must be");
1069 kit.add_exception_states_from(new_jvms);
1070 kit.set_jvms(new_jvms);
1071 if (!kit.stopped()) {
1072 result_jvms[results++] = kit.jvms();
1073 }
1074 }
1075
1076 if (results == 0) {
1077 // All paths ended in uncommon traps.
1078 (void) kit.stop();
1079 return kit.transfer_exceptions_into_jvms();
1080 }
1081
1082 if (results == 1) { // Only one path
1083 kit.set_jvms(result_jvms[0]);
1084 return kit.transfer_exceptions_into_jvms();
1085 }
1086
1087 // Merge all paths.
1088 kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1089 RegionNode* region = new RegionNode(results + 1);
1090 Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1091 for (int i = 0; i < results; i++) {
1092 JVMState* jvms = result_jvms[i];
1093 int path = i + 1;
1094 SafePointNode* map = jvms->map();
1095 region->init_req(path, map->control());
1096 iophi->set_req(path, map->i_o());
1097 if (i == 0) {
1098 kit.set_jvms(jvms);
1099 } else {
1100 kit.merge_memory(map->merged_memory(), region, path);
1101 }
1102 }
1103 kit.set_control(gvn.transform(region));
1104 kit.set_i_o(gvn.transform(iophi));
1105 // Transform new memory Phis.
1106 for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1107 Node* phi = mms.memory();
1108 if (phi->is_Phi() && phi->in(0) == region) {
1109 mms.set_memory(gvn.transform(phi));
1110 }
1111 }
1112
1113 // Merge debug info.
1114 Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1115 uint tos = kit.jvms()->stkoff() + kit.sp();
1116 Node* map = kit.map();
1117 uint limit = map->req();
1118 for (uint i = TypeFunc::Parms; i < limit; i++) {
1119 // Skip unused stack slots; fast forward to monoff();
1120 if (i == tos) {
1121 i = kit.jvms()->monoff();
1122 if( i >= limit ) break;
1123 }
1124 Node* n = map->in(i);
1125 ins[0] = n;
1126 const Type* t = gvn.type(n);
1127 bool needs_phi = false;
1128 for (int j = 1; j < results; j++) {
1129 JVMState* jvms = result_jvms[j];
1130 Node* jmap = jvms->map();
1131 Node* m = NULL;
1132 if (jmap->req() > i) {
1133 m = jmap->in(i);
1134 if (m != n) {
1135 needs_phi = true;
1136 t = t->meet_speculative(gvn.type(m));
1137 }
1138 }
1139 ins[j] = m;
1140 }
1141 if (needs_phi) {
1142 Node* phi = PhiNode::make(region, n, t);
1143 for (int j = 1; j < results; j++) {
1144 phi->set_req(j + 1, ins[j]);
1145 }
1146 map->set_req(i, gvn.transform(phi));
1147 }
1148 }
1149
1150 return kit.transfer_exceptions_into_jvms();
1151 }
1152
1153 //-------------------------UncommonTrapCallGenerator-----------------------------
1154 // Internal class which handles all out-of-line calls checking receiver type.
1155 class UncommonTrapCallGenerator : public CallGenerator {
1156 Deoptimization::DeoptReason _reason;
1157 Deoptimization::DeoptAction _action;
1158
1159 public:
1160 UncommonTrapCallGenerator(ciMethod* m,
1161 Deoptimization::DeoptReason reason,
1162 Deoptimization::DeoptAction action)
1163 : CallGenerator(m)
1164 {
1165 _reason = reason;
1166 _action = action;
1167 }
1168
1169 virtual bool is_virtual() const { ShouldNotReachHere(); return false; }
1170 virtual bool is_trap() const { return true; }
1171
1172 virtual JVMState* generate(JVMState* jvms);
1173 };
1174
1175
1176 CallGenerator*
1177 CallGenerator::for_uncommon_trap(ciMethod* m,
1178 Deoptimization::DeoptReason reason,
1179 Deoptimization::DeoptAction action) {
1180 return new UncommonTrapCallGenerator(m, reason, action);
1181 }
1182
1183
1184 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1185 GraphKit kit(jvms);
1186 kit.C->print_inlining_update(this);
1187 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver).
1188 int nargs = method()->arg_size();
1189 kit.inc_sp(nargs);
1190 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1191 if (_reason == Deoptimization::Reason_class_check &&
1192 _action == Deoptimization::Action_maybe_recompile) {
1193 // Temp fix for 6529811
1194 // Don't allow uncommon_trap to override our decision to recompile in the event
1195 // of a class cast failure for a monomorphic call as it will never let us convert
1196 // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1197 bool keep_exact_action = true;
1198 kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action);
1199 } else {
1200 kit.uncommon_trap(_reason, _action);
1201 }
1202 return kit.transfer_exceptions_into_jvms();
1203 }
1204
1205 // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1206
1207 // (Node: Merged hook_up_exits into ParseGenerator::generate.)
1208
1209 #define NODES_OVERHEAD_PER_METHOD (30.0)
1210 #define NODES_PER_BYTECODE (9.5)
1211
1212 void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
1213 int call_count = profile.count();
1214 int code_size = call_method->code_size();
1215
1216 // Expected execution count is based on the historical count:
1217 _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
1218
1219 // Expected profit from inlining, in units of simple call-overheads.
1220 _profit = 1.0;
1221
1222 // Expected work performed by the call in units of call-overheads.
1223 // %%% need an empirical curve fit for "work" (time in call)
1224 float bytecodes_per_call = 3;
1225 _work = 1.0 + code_size / bytecodes_per_call;
1226
1227 // Expected size of compilation graph:
1228 // -XX:+PrintParseStatistics once reported:
1229 // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
1230 // Histogram of 144298 parsed bytecodes:
1231 // %%% Need an better predictor for graph size.
1232 _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
1233 }
1234
1235 // is_cold: Return true if the node should never be inlined.
1236 // This is true if any of the key metrics are extreme.
1237 bool WarmCallInfo::is_cold() const {
1238 if (count() < WarmCallMinCount) return true;
1239 if (profit() < WarmCallMinProfit) return true;
1240 if (work() > WarmCallMaxWork) return true;
1241 if (size() > WarmCallMaxSize) return true;
1242 return false;
1243 }
1244
1245 // is_hot: Return true if the node should be inlined immediately.
1246 // This is true if any of the key metrics are extreme.
1247 bool WarmCallInfo::is_hot() const {
1248 assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
1249 if (count() >= HotCallCountThreshold) return true;
1250 if (profit() >= HotCallProfitThreshold) return true;
1251 if (work() <= HotCallTrivialWork) return true;
1252 if (size() <= HotCallTrivialSize) return true;
1253 return false;
1254 }
1255
1256 // compute_heat:
1257 float WarmCallInfo::compute_heat() const {
1258 assert(!is_cold(), "compute heat only on warm nodes");
1259 assert(!is_hot(), "compute heat only on warm nodes");
1260 int min_size = MAX2(0, (int)HotCallTrivialSize);
1261 int max_size = MIN2(500, (int)WarmCallMaxSize);
1262 float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
1263 float size_factor;
1264 if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
1265 else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
1266 else if (method_size < 0.5) size_factor = 1; // better than avg.
1267 else size_factor = 0.5; // worse than avg.
1268 return (count() * profit() * size_factor);
1269 }
1270
1271 bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
1272 assert(this != that, "compare only different WCIs");
1273 assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
1274 if (this->heat() > that->heat()) return true;
1275 if (this->heat() < that->heat()) return false;
1276 assert(this->heat() == that->heat(), "no NaN heat allowed");
1277 // Equal heat. Break the tie some other way.
1278 if (!this->call() || !that->call()) return (address)this > (address)that;
1279 return this->call()->_idx > that->call()->_idx;
1280 }
1281
1282 //#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
1283 #define UNINIT_NEXT ((WarmCallInfo*)NULL)
1284
1285 WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
1286 assert(next() == UNINIT_NEXT, "not yet on any list");
1287 WarmCallInfo* prev_p = NULL;
1288 WarmCallInfo* next_p = head;
1289 while (next_p != NULL && next_p->warmer_than(this)) {
1290 prev_p = next_p;
1291 next_p = prev_p->next();
1292 }
1293 // Install this between prev_p and next_p.
1294 this->set_next(next_p);
1295 if (prev_p == NULL)
1296 head = this;
1297 else
1298 prev_p->set_next(this);
1299 return head;
1300 }
1301
1302 WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
1303 WarmCallInfo* prev_p = NULL;
1304 WarmCallInfo* next_p = head;
1305 while (next_p != this) {
1306 assert(next_p != NULL, "this must be in the list somewhere");
1307 prev_p = next_p;
1308 next_p = prev_p->next();
1309 }
1310 next_p = this->next();
1311 debug_only(this->set_next(UNINIT_NEXT));
1312 // Remove this from between prev_p and next_p.
1313 if (prev_p == NULL)
1314 head = next_p;
1315 else
1316 prev_p->set_next(next_p);
1317 return head;
1318 }
1319
1320 WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
1321 WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
1322 WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
1323 WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
1324
1325 WarmCallInfo* WarmCallInfo::always_hot() {
1326 assert(_always_hot.is_hot(), "must always be hot");
1327 return &_always_hot;
1328 }
1329
1330 WarmCallInfo* WarmCallInfo::always_cold() {
1331 assert(_always_cold.is_cold(), "must always be cold");
1332 return &_always_cold;
1333 }
1334
1335
1336 #ifndef PRODUCT
1337
1338 void WarmCallInfo::print() const {
1339 tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
1340 is_cold() ? "cold" : is_hot() ? "hot " : "warm",
1341 count(), profit(), work(), size(), compute_heat(), next());
1342 tty->cr();
1343 if (call() != NULL) call()->dump();
1344 }
1345
1346 void print_wci(WarmCallInfo* ci) {
1347 ci->print();
1348 }
1349
1350 void WarmCallInfo::print_all() const {
1351 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1352 p->print();
1353 }
1354
1355 int WarmCallInfo::count_all() const {
1356 int cnt = 0;
1357 for (const WarmCallInfo* p = this; p != NULL; p = p->next())
1358 cnt++;
1359 return cnt;
1360 }
1361
1362 #endif //PRODUCT
--- EOF ---