15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "ci/ciReplay.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/exceptionHandlerTable.hpp"
30 #include "code/nmethod.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "opto/addnode.hpp"
37 #include "opto/block.hpp"
38 #include "opto/c2compiler.hpp"
39 #include "opto/callGenerator.hpp"
40 #include "opto/callnode.hpp"
41 #include "opto/castnode.hpp"
42 #include "opto/cfgnode.hpp"
43 #include "opto/chaitin.hpp"
44 #include "opto/compile.hpp"
45 #include "opto/connode.hpp"
46 #include "opto/convertnode.hpp"
47 #include "opto/divnode.hpp"
48 #include "opto/escape.hpp"
49 #include "opto/idealGraphPrinter.hpp"
50 #include "opto/loopnode.hpp"
51 #include "opto/machnode.hpp"
52 #include "opto/macro.hpp"
53 #include "opto/matcher.hpp"
|
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "ci/ciReplay.hpp"
28 #include "classfile/systemDictionary.hpp"
29 #include "code/exceptionHandlerTable.hpp"
30 #include "code/nmethod.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/compileLog.hpp"
33 #include "compiler/disassembler.hpp"
34 #include "compiler/oopMap.hpp"
35 #include "gc/shared/barrierSet.hpp"
36 #include "gc/shared/c2/barrierSetC2.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "opto/addnode.hpp"
39 #include "opto/block.hpp"
40 #include "opto/c2compiler.hpp"
41 #include "opto/callGenerator.hpp"
42 #include "opto/callnode.hpp"
43 #include "opto/castnode.hpp"
44 #include "opto/cfgnode.hpp"
45 #include "opto/chaitin.hpp"
46 #include "opto/compile.hpp"
47 #include "opto/connode.hpp"
48 #include "opto/convertnode.hpp"
49 #include "opto/divnode.hpp"
50 #include "opto/escape.hpp"
51 #include "opto/idealGraphPrinter.hpp"
52 #include "opto/loopnode.hpp"
53 #include "opto/machnode.hpp"
54 #include "opto/macro.hpp"
55 #include "opto/matcher.hpp"
|
396 for (int i = range_check_cast_count() - 1; i >= 0; i--) {
397 Node* cast = range_check_cast_node(i);
398 if (!useful.member(cast)) {
399 remove_range_check_cast(cast);
400 }
401 }
402 // Remove useless expensive nodes
403 for (int i = C->expensive_count()-1; i >= 0; i--) {
404 Node* n = C->expensive_node(i);
405 if (!useful.member(n)) {
406 remove_expensive_node(n);
407 }
408 }
409 // Remove useless Opaque4 nodes
410 for (int i = opaque4_count() - 1; i >= 0; i--) {
411 Node* opaq = opaque4_node(i);
412 if (!useful.member(opaq)) {
413 remove_opaque4_node(opaq);
414 }
415 }
416 // clean up the late inline lists
417 remove_useless_late_inlines(&_string_late_inlines, useful);
418 remove_useless_late_inlines(&_boxing_late_inlines, useful);
419 remove_useless_late_inlines(&_late_inlines, useful);
420 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
421 }
422
423 //------------------------------frame_size_in_words-----------------------------
424 // frame_slots in units of words
425 int Compile::frame_size_in_words() const {
426 // shift is 0 in LP32 and 1 in LP64
427 const int shift = (LogBytesPerWord - LogBytesPerInt);
428 int words = _frame_slots >> shift;
429 assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
430 return words;
431 }
432
433 // To bang the stack of this compiled method we use the stack size
434 // that the interpreter would need in case of a deoptimization. This
|
398 for (int i = range_check_cast_count() - 1; i >= 0; i--) {
399 Node* cast = range_check_cast_node(i);
400 if (!useful.member(cast)) {
401 remove_range_check_cast(cast);
402 }
403 }
404 // Remove useless expensive nodes
405 for (int i = C->expensive_count()-1; i >= 0; i--) {
406 Node* n = C->expensive_node(i);
407 if (!useful.member(n)) {
408 remove_expensive_node(n);
409 }
410 }
411 // Remove useless Opaque4 nodes
412 for (int i = opaque4_count() - 1; i >= 0; i--) {
413 Node* opaq = opaque4_node(i);
414 if (!useful.member(opaq)) {
415 remove_opaque4_node(opaq);
416 }
417 }
418 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
419 bs->eliminate_useless_gc_barriers(useful);
420 // clean up the late inline lists
421 remove_useless_late_inlines(&_string_late_inlines, useful);
422 remove_useless_late_inlines(&_boxing_late_inlines, useful);
423 remove_useless_late_inlines(&_late_inlines, useful);
424 debug_only(verify_graph_edges(true/*check for no_dead_code*/);)
425 }
426
427 //------------------------------frame_size_in_words-----------------------------
428 // frame_slots in units of words
429 int Compile::frame_size_in_words() const {
430 // shift is 0 in LP32 and 1 in LP64
431 const int shift = (LogBytesPerWord - LogBytesPerInt);
432 int words = _frame_slots >> shift;
433 assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" );
434 return words;
435 }
436
437 // To bang the stack of this compiled method we use the stack size
438 // that the interpreter would need in case of a deoptimization. This
|
619 // ============================================================================
620 //------------------------------Compile standard-------------------------------
621 debug_only( int Compile::_debug_idx = 100000; )
622
623 // Compile a method. entry_bci is -1 for normal compilations and indicates
624 // the continuation bci for on stack replacement.
625
626
627 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
628 bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, DirectiveSet* directive)
629 : Phase(Compiler),
630 _env(ci_env),
631 _directive(directive),
632 _log(ci_env->log()),
633 _compile_id(ci_env->compile_id()),
634 _save_argument_registers(false),
635 _stub_name(NULL),
636 _stub_function(NULL),
637 _stub_entry_point(NULL),
638 _method(target),
639 _entry_bci(osr_bci),
640 _initial_gvn(NULL),
641 _for_igvn(NULL),
642 _warm_calls(NULL),
643 _subsume_loads(subsume_loads),
644 _do_escape_analysis(do_escape_analysis),
645 _eliminate_boxing(eliminate_boxing),
646 _failure_reason(NULL),
647 _code_buffer("Compile::Fill_buffer"),
648 _orig_pc_slot(0),
649 _orig_pc_slot_offset_in_bytes(0),
650 _has_method_handle_invokes(false),
651 _mach_constant_base_node(NULL),
652 _node_bundling_limit(0),
653 _node_bundling_base(NULL),
654 _java_calls(0),
655 _inner_loops(0),
656 _scratch_const_size(-1),
657 _in_scratch_emit_size(false),
|
623 // ============================================================================
624 //------------------------------Compile standard-------------------------------
625 debug_only( int Compile::_debug_idx = 100000; )
626
627 // Compile a method. entry_bci is -1 for normal compilations and indicates
628 // the continuation bci for on stack replacement.
629
630
631 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
632 bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, DirectiveSet* directive)
633 : Phase(Compiler),
634 _env(ci_env),
635 _directive(directive),
636 _log(ci_env->log()),
637 _compile_id(ci_env->compile_id()),
638 _save_argument_registers(false),
639 _stub_name(NULL),
640 _stub_function(NULL),
641 _stub_entry_point(NULL),
642 _method(target),
643 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
644 _entry_bci(osr_bci),
645 _initial_gvn(NULL),
646 _for_igvn(NULL),
647 _warm_calls(NULL),
648 _subsume_loads(subsume_loads),
649 _do_escape_analysis(do_escape_analysis),
650 _eliminate_boxing(eliminate_boxing),
651 _failure_reason(NULL),
652 _code_buffer("Compile::Fill_buffer"),
653 _orig_pc_slot(0),
654 _orig_pc_slot_offset_in_bytes(0),
655 _has_method_handle_invokes(false),
656 _mach_constant_base_node(NULL),
657 _node_bundling_limit(0),
658 _node_bundling_base(NULL),
659 _java_calls(0),
660 _inner_loops(0),
661 _scratch_const_size(-1),
662 _in_scratch_emit_size(false),
|
754
755 // Put top into the hash table ASAP.
756 initial_gvn()->transform_no_reclaim(top());
757
758 // Set up tf(), start(), and find a CallGenerator.
759 CallGenerator* cg = NULL;
760 if (is_osr_compilation()) {
761 const TypeTuple *domain = StartOSRNode::osr_domain();
762 const TypeTuple *range = TypeTuple::make_range(method()->signature());
763 init_tf(TypeFunc::make(domain, range));
764 StartNode* s = new StartOSRNode(root(), domain);
765 initial_gvn()->set_type_bottom(s);
766 init_start(s);
767 cg = CallGenerator::for_osr(method(), entry_bci());
768 } else {
769 // Normal case.
770 init_tf(TypeFunc::make(method()));
771 StartNode* s = new StartNode(root(), tf()->domain());
772 initial_gvn()->set_type_bottom(s);
773 init_start(s);
774 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) {
775 // With java.lang.ref.reference.get() we must go through the
776 // intrinsic when G1 is enabled - even when get() is the root
777 // method of the compile - so that, if necessary, the value in
778 // the referent field of the reference object gets recorded by
779 // the pre-barrier code.
780 // Specifically, if G1 is enabled, the value in the referent
781 // field is recorded by the G1 SATB pre barrier. This will
782 // result in the referent being marked live and the reference
783 // object removed from the list of discovered references during
784 // reference processing.
785 cg = find_intrinsic(method(), false);
786 }
787 if (cg == NULL) {
788 float past_uses = method()->interpreter_invocation_count();
789 float expected_uses = past_uses;
790 cg = CallGenerator::for_inline(method(), expected_uses);
791 }
792 }
793 if (failing()) return;
794 if (cg == NULL) {
795 record_method_not_compilable("cannot parse method");
796 return;
797 }
798 JVMState* jvms = build_start_state(start(), tf());
799 if ((jvms = cg->generate(jvms)) == NULL) {
800 if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) {
801 record_method_not_compilable("method parse failed");
802 }
803 return;
|
759
760 // Put top into the hash table ASAP.
761 initial_gvn()->transform_no_reclaim(top());
762
763 // Set up tf(), start(), and find a CallGenerator.
764 CallGenerator* cg = NULL;
765 if (is_osr_compilation()) {
766 const TypeTuple *domain = StartOSRNode::osr_domain();
767 const TypeTuple *range = TypeTuple::make_range(method()->signature());
768 init_tf(TypeFunc::make(domain, range));
769 StartNode* s = new StartOSRNode(root(), domain);
770 initial_gvn()->set_type_bottom(s);
771 init_start(s);
772 cg = CallGenerator::for_osr(method(), entry_bci());
773 } else {
774 // Normal case.
775 init_tf(TypeFunc::make(method()));
776 StartNode* s = new StartNode(root(), tf()->domain());
777 initial_gvn()->set_type_bottom(s);
778 init_start(s);
779 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
780 // With java.lang.ref.reference.get() we must go through the
781 // intrinsic - even when get() is the root
782 // method of the compile - so that, if necessary, the value in
783 // the referent field of the reference object gets recorded by
784 // the pre-barrier code.
785 cg = find_intrinsic(method(), false);
786 }
787 if (cg == NULL) {
788 float past_uses = method()->interpreter_invocation_count();
789 float expected_uses = past_uses;
790 cg = CallGenerator::for_inline(method(), expected_uses);
791 }
792 }
793 if (failing()) return;
794 if (cg == NULL) {
795 record_method_not_compilable("cannot parse method");
796 return;
797 }
798 JVMState* jvms = build_start_state(start(), tf());
799 if ((jvms = cg->generate(jvms)) == NULL) {
800 if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) {
801 record_method_not_compilable("method parse failed");
802 }
803 return;
|
2316 igvn.optimize();
2317 }
2318
2319 print_method(PHASE_ITER_GVN2, 2);
2320
2321 if (failing()) return;
2322
2323 // Loop transforms on the ideal graph. Range Check Elimination,
2324 // peeling, unrolling, etc.
2325 if(loop_opts_cnt > 0) {
2326 debug_only( int cnt = 0; );
2327 while(major_progress() && (loop_opts_cnt > 0)) {
2328 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2329 assert( cnt++ < 40, "infinite cycle in loop optimization" );
2330 PhaseIdealLoop ideal_loop( igvn, true);
2331 loop_opts_cnt--;
2332 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2333 if (failing()) return;
2334 }
2335 }
2336 // Ensure that major progress is now clear
2337 C->clear_major_progress();
2338
2339 {
2340 // Verify that all previous optimizations produced a valid graph
2341 // at least to this point, even if no loop optimizations were done.
2342 TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2343 PhaseIdealLoop::verify(igvn);
2344 }
2345
2346 if (range_check_cast_count() > 0) {
2347 // No more loop optimizations. Remove all range check dependent CastIINodes.
2348 C->remove_range_check_casts(igvn);
2349 igvn.optimize();
2350 }
2351
2352 {
2353 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2354 PhaseMacroExpand mex(igvn);
2355 if (mex.expand_macro_nodes()) {
2356 assert(failing(), "must bail out w/ explicit message");
2357 return;
2358 }
2359 }
2360
2361 if (opaque4_count() > 0) {
2362 C->remove_opaque4_nodes(igvn);
2363 igvn.optimize();
2364 }
2365
2366 DEBUG_ONLY( _modified_nodes = NULL; )
2367 } // (End scope of igvn; run destructor if necessary for asserts.)
2368
2369 process_print_inlining();
|
2316 igvn.optimize();
2317 }
2318
2319 print_method(PHASE_ITER_GVN2, 2);
2320
2321 if (failing()) return;
2322
2323 // Loop transforms on the ideal graph. Range Check Elimination,
2324 // peeling, unrolling, etc.
2325 if(loop_opts_cnt > 0) {
2326 debug_only( int cnt = 0; );
2327 while(major_progress() && (loop_opts_cnt > 0)) {
2328 TracePhase tp("idealLoop", &timers[_t_idealLoop]);
2329 assert( cnt++ < 40, "infinite cycle in loop optimization" );
2330 PhaseIdealLoop ideal_loop( igvn, true);
2331 loop_opts_cnt--;
2332 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2333 if (failing()) return;
2334 }
2335 }
2336
2337 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
2338 bs->find_dominating_barriers(igvn);
2339 if (failing()) return;
2340
2341 // Ensure that major progress is now clear
2342 C->clear_major_progress();
2343
2344 {
2345 // Verify that all previous optimizations produced a valid graph
2346 // at least to this point, even if no loop optimizations were done.
2347 TracePhase tp("idealLoopVerify", &timers[_t_idealLoopVerify]);
2348 PhaseIdealLoop::verify(igvn);
2349 }
2350
2351 if (range_check_cast_count() > 0) {
2352 // No more loop optimizations. Remove all range check dependent CastIINodes.
2353 C->remove_range_check_casts(igvn);
2354 igvn.optimize();
2355 }
2356
2357 #ifdef ASSERT
2358 bs->verify_gc_barriers(false);
2359 #endif
2360
2361 {
2362 TracePhase tp("macroExpand", &timers[_t_macroExpand]);
2363 PhaseMacroExpand mex(igvn);
2364 if (mex.expand_macro_nodes()) {
2365 assert(failing(), "must bail out w/ explicit message");
2366 return;
2367 }
2368 }
2369
2370 if (opaque4_count() > 0) {
2371 C->remove_opaque4_nodes(igvn);
2372 igvn.optimize();
2373 }
2374
2375 DEBUG_ONLY( _modified_nodes = NULL; )
2376 } // (End scope of igvn; run destructor if necessary for asserts.)
2377
2378 process_print_inlining();
|