< prev index next >

src/hotspot/share/opto/compile.cpp

Print this page




 530 }
 531 
 532 
 533 //-----------------------init_scratch_buffer_blob------------------------------
 534 // Construct a temporary BufferBlob and cache it for this compile.
 535 void Compile::init_scratch_buffer_blob(int const_size) {
 536   // If there is already a scratch buffer blob allocated and the
 537   // constant section is big enough, use it.  Otherwise free the
 538   // current and allocate a new one.
 539   BufferBlob* blob = scratch_buffer_blob();
 540   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
 541     // Use the current blob.
 542   } else {
 543     if (blob != NULL) {
 544       BufferBlob::free(blob);
 545     }
 546 
 547     ResourceMark rm;
 548     _scratch_const_size = const_size;
 549     int size = C2Compiler::initial_code_buffer_size(const_size);






 550     blob = BufferBlob::create("Compile::scratch_buffer", size);
 551     // Record the buffer blob for next time.
 552     set_scratch_buffer_blob(blob);
 553     // Have we run out of code space?
 554     if (scratch_buffer_blob() == NULL) {
 555       // Let CompilerBroker disable further compilations.
 556       record_failure("Not enough space for scratch buffer in CodeCache");
 557       return;
 558     }
 559   }
 560 
 561   // Initialize the relocation buffers
 562   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
 563   set_scratch_locs_memory(locs_buf);
 564 }
 565 
 566 
 567 //-----------------------scratch_emit_size-------------------------------------
 568 // Helper function that computes size by emitting code
 569 uint Compile::scratch_emit_size(const Node* n) {


 594   int lsize = MAX_locs_size / 3;
 595   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
 596   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
 597   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
 598   // Mark as scratch buffer.
 599   buf.consts()->set_scratch_emit();
 600   buf.insts()->set_scratch_emit();
 601   buf.stubs()->set_scratch_emit();
 602 
 603   // Do the emission.
 604 
 605   Label fakeL; // Fake label for branch instructions.
 606   Label*   saveL = NULL;
 607   uint save_bnum = 0;
 608   bool is_branch = n->is_MachBranch();
 609   if (is_branch) {
 610     MacroAssembler masm(&buf);
 611     masm.bind(fakeL);
 612     n->as_MachBranch()->save_label(&saveL, &save_bnum);
 613     n->as_MachBranch()->label_set(&fakeL, 0);



 614   }
 615   n->emit(buf, this->regalloc());
 616 
 617   // Emitting into the scratch buffer should not fail
 618   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
 619 
 620   // Restore label.
 621   if (is_branch) {
 622     n->as_MachBranch()->label_set(saveL, save_bnum);


 623   }
 624 
 625   // End scratch_emit_size section.
 626   set_in_scratch_emit_size(false);
 627 
 628   return buf.insts_size();
 629 }
 630 
 631 
 632 // ============================================================================
 633 //------------------------------Compile standard-------------------------------
 634 debug_only( int Compile::_debug_idx = 100000; )
 635 
 636 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 637 // the continuation bci for on stack replacement.
 638 
 639 
 640 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
 641                   bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, DirectiveSet* directive)
 642                 : Phase(Compiler),
 643                   _compile_id(ci_env->compile_id()),
 644                   _save_argument_registers(false),
 645                   _subsume_loads(subsume_loads),
 646                   _do_escape_analysis(do_escape_analysis),
 647                   _eliminate_boxing(eliminate_boxing),
 648                   _method(target),
 649                   _entry_bci(osr_bci),
 650                   _stub_function(NULL),
 651                   _stub_name(NULL),
 652                   _stub_entry_point(NULL),
 653                   _max_node_limit(MaxNodeLimit),
 654                   _orig_pc_slot(0),
 655                   _orig_pc_slot_offset_in_bytes(0),


 656                   _inlining_progress(false),
 657                   _inlining_incrementally(false),
 658                   _has_reserved_stack_access(target->has_reserved_stack_access()),
 659 #ifndef PRODUCT
 660                   _trace_opto_output(directive->TraceOptoOutputOption),
 661 #endif
 662                   _has_method_handle_invokes(false),
 663                   _comp_arena(mtCompiler),
 664                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 665                   _env(ci_env),
 666                   _directive(directive),
 667                   _log(ci_env->log()),
 668                   _failure_reason(NULL),
 669                   _congraph(NULL),
 670 #ifndef PRODUCT
 671                   _printer(IdealGraphPrinter::printer()),
 672 #endif
 673                   _dead_node_list(comp_arena()),
 674                   _dead_node_count(0),
 675                   _node_arena(mtCompiler),


 897 #endif
 898 
 899 #ifdef ASSERT
 900   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 901   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 902 #endif
 903 
 904   // Dump compilation data to replay it.
 905   if (directive->DumpReplayOption) {
 906     env()->dump_replay_data(_compile_id);
 907   }
 908   if (directive->DumpInlineOption && (ilt() != NULL)) {
 909     env()->dump_inline_data(_compile_id);
 910   }
 911 
 912   // Now that we know the size of all the monitors we can add a fixed slot
 913   // for the original deopt pc.
 914 
 915   _orig_pc_slot =  fixed_slots();
 916   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);







 917   set_fixed_slots(next_slot);
 918 
 919   // Compute when to use implicit null checks. Used by matching trap based
 920   // nodes and NullCheck optimization.
 921   set_allowed_deopt_reasons();
 922 
 923   // Now generate code
 924   Code_Gen();
 925   if (failing())  return;
 926 
 927   // Check if we want to skip execution of all compiled code.
 928   {
 929 #ifndef PRODUCT
 930     if (OptoNoExecute) {
 931       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
 932       return;
 933     }
 934 #endif
 935     TracePhase tp("install_code", &timers[_t_registerMethod]);
 936 
 937     if (is_osr_compilation()) {
 938       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
 939       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
 940     } else {
 941       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);







 942       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
 943     }
 944 
 945     env()->register_method(_method, _entry_bci,
 946                            &_code_offsets,
 947                            _orig_pc_slot_offset_in_bytes,
 948                            code_buffer(),
 949                            frame_size_in_words(), _oop_map_set,
 950                            &_handler_table, &_inc_table,
 951                            compiler,
 952                            has_unsafe_access(),
 953                            SharedRuntime::is_wide_vector(max_vector_size()),
 954                            rtm_state()
 955                            );
 956 
 957     if (log() != NULL) // Print code cache state into compiler log
 958       log()->code_cache_state();
 959   }
 960 }
 961 


 967                   const char *stub_name,
 968                   int is_fancy_jump,
 969                   bool pass_tls,
 970                   bool save_arg_registers,
 971                   bool return_pc,
 972                   DirectiveSet* directive)
 973   : Phase(Compiler),
 974     _compile_id(0),
 975     _save_argument_registers(save_arg_registers),
 976     _subsume_loads(true),
 977     _do_escape_analysis(false),
 978     _eliminate_boxing(false),
 979     _method(NULL),
 980     _entry_bci(InvocationEntryBci),
 981     _stub_function(stub_function),
 982     _stub_name(stub_name),
 983     _stub_entry_point(NULL),
 984     _max_node_limit(MaxNodeLimit),
 985     _orig_pc_slot(0),
 986     _orig_pc_slot_offset_in_bytes(0),


 987     _inlining_progress(false),
 988     _inlining_incrementally(false),
 989     _has_reserved_stack_access(false),
 990 #ifndef PRODUCT
 991     _trace_opto_output(directive->TraceOptoOutputOption),
 992 #endif
 993     _has_method_handle_invokes(false),
 994     _comp_arena(mtCompiler),
 995     _env(ci_env),
 996     _directive(directive),
 997     _log(ci_env->log()),
 998     _failure_reason(NULL),
 999     _congraph(NULL),
1000 #ifndef PRODUCT
1001     _printer(NULL),
1002 #endif
1003     _dead_node_list(comp_arena()),
1004     _dead_node_count(0),
1005     _node_arena(mtCompiler),
1006     _old_arena(mtCompiler),




 530 }
 531 
 532 
 533 //-----------------------init_scratch_buffer_blob------------------------------
 534 // Construct a temporary BufferBlob and cache it for this compile.
 535 void Compile::init_scratch_buffer_blob(int const_size) {
 536   // If there is already a scratch buffer blob allocated and the
 537   // constant section is big enough, use it.  Otherwise free the
 538   // current and allocate a new one.
 539   BufferBlob* blob = scratch_buffer_blob();
 540   if ((blob != NULL) && (const_size <= _scratch_const_size)) {
 541     // Use the current blob.
 542   } else {
 543     if (blob != NULL) {
 544       BufferBlob::free(blob);
 545     }
 546 
 547     ResourceMark rm;
 548     _scratch_const_size = const_size;
 549     int size = C2Compiler::initial_code_buffer_size(const_size);
 550 #ifdef ASSERT
 551     if (C->has_scalarized_args()) {
 552       // Oop verification for loading object fields from scalarized value types in the new entry point requires lots of space
 553       size += 5120;
 554     }
 555 #endif
 556     blob = BufferBlob::create("Compile::scratch_buffer", size);
 557     // Record the buffer blob for next time.
 558     set_scratch_buffer_blob(blob);
 559     // Have we run out of code space?
 560     if (scratch_buffer_blob() == NULL) {
 561       // Let CompilerBroker disable further compilations.
 562       record_failure("Not enough space for scratch buffer in CodeCache");
 563       return;
 564     }
 565   }
 566 
 567   // Initialize the relocation buffers
 568   relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size;
 569   set_scratch_locs_memory(locs_buf);
 570 }
 571 
 572 
 573 //-----------------------scratch_emit_size-------------------------------------
 574 // Helper function that computes size by emitting code
 575 uint Compile::scratch_emit_size(const Node* n) {


 600   int lsize = MAX_locs_size / 3;
 601   buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
 602   buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
 603   buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
 604   // Mark as scratch buffer.
 605   buf.consts()->set_scratch_emit();
 606   buf.insts()->set_scratch_emit();
 607   buf.stubs()->set_scratch_emit();
 608 
 609   // Do the emission.
 610 
 611   Label fakeL; // Fake label for branch instructions.
 612   Label*   saveL = NULL;
 613   uint save_bnum = 0;
 614   bool is_branch = n->is_MachBranch();
 615   if (is_branch) {
 616     MacroAssembler masm(&buf);
 617     masm.bind(fakeL);
 618     n->as_MachBranch()->save_label(&saveL, &save_bnum);
 619     n->as_MachBranch()->label_set(&fakeL, 0);
 620   } else if (n->is_MachProlog()) {
 621     saveL = ((MachPrologNode*)n)->_verified_entry;
 622     ((MachPrologNode*)n)->_verified_entry = &fakeL;
 623   }
 624   n->emit(buf, this->regalloc());
 625 
 626   // Emitting into the scratch buffer should not fail
 627   assert (!failing(), "Must not have pending failure. Reason is: %s", failure_reason());
 628 
 629   // Restore label.
 630   if (is_branch) {
 631     n->as_MachBranch()->label_set(saveL, save_bnum);
 632   } else if (n->is_MachProlog()) {
 633     ((MachPrologNode*)n)->_verified_entry = saveL;
 634   }
 635 
 636   // End scratch_emit_size section.
 637   set_in_scratch_emit_size(false);
 638 
 639   return buf.insts_size();
 640 }
 641 
 642 
 643 // ============================================================================
 644 //------------------------------Compile standard-------------------------------
 645 debug_only( int Compile::_debug_idx = 100000; )
 646 
 647 // Compile a method.  entry_bci is -1 for normal compilations and indicates
 648 // the continuation bci for on stack replacement.
 649 
 650 
 651 Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci,
 652                   bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing, DirectiveSet* directive)
 653                 : Phase(Compiler),
 654                   _compile_id(ci_env->compile_id()),
 655                   _save_argument_registers(false),
 656                   _subsume_loads(subsume_loads),
 657                   _do_escape_analysis(do_escape_analysis),
 658                   _eliminate_boxing(eliminate_boxing),
 659                   _method(target),
 660                   _entry_bci(osr_bci),
 661                   _stub_function(NULL),
 662                   _stub_name(NULL),
 663                   _stub_entry_point(NULL),
 664                   _max_node_limit(MaxNodeLimit),
 665                   _orig_pc_slot(0),
 666                   _orig_pc_slot_offset_in_bytes(0),
 667                   _sp_inc_slot(0),
 668                   _sp_inc_slot_offset_in_bytes(0),
 669                   _inlining_progress(false),
 670                   _inlining_incrementally(false),
 671                   _has_reserved_stack_access(target->has_reserved_stack_access()),
 672 #ifndef PRODUCT
 673                   _trace_opto_output(directive->TraceOptoOutputOption),
 674 #endif
 675                   _has_method_handle_invokes(false),
 676                   _comp_arena(mtCompiler),
 677                   _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
 678                   _env(ci_env),
 679                   _directive(directive),
 680                   _log(ci_env->log()),
 681                   _failure_reason(NULL),
 682                   _congraph(NULL),
 683 #ifndef PRODUCT
 684                   _printer(IdealGraphPrinter::printer()),
 685 #endif
 686                   _dead_node_list(comp_arena()),
 687                   _dead_node_count(0),
 688                   _node_arena(mtCompiler),


 910 #endif
 911 
 912 #ifdef ASSERT
 913   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 914   bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
 915 #endif
 916 
 917   // Dump compilation data to replay it.
 918   if (directive->DumpReplayOption) {
 919     env()->dump_replay_data(_compile_id);
 920   }
 921   if (directive->DumpInlineOption && (ilt() != NULL)) {
 922     env()->dump_inline_data(_compile_id);
 923   }
 924 
 925   // Now that we know the size of all the monitors we can add a fixed slot
 926   // for the original deopt pc.
 927 
 928   _orig_pc_slot = fixed_slots();
 929   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
 930 
 931   if (method()->get_Method()->needs_stack_repair()) {
 932     // One extra slot for the special stack increment value
 933     _sp_inc_slot = next_slot;
 934     next_slot += 2;
 935   }
 936 
 937   set_fixed_slots(next_slot);
 938 
 939   // Compute when to use implicit null checks. Used by matching trap based
 940   // nodes and NullCheck optimization.
 941   set_allowed_deopt_reasons();
 942 
 943   // Now generate code
 944   Code_Gen();
 945   if (failing())  return;
 946 
 947   // Check if we want to skip execution of all compiled code.
 948   {
 949 #ifndef PRODUCT
 950     if (OptoNoExecute) {
 951       record_method_not_compilable("+OptoNoExecute");  // Flag as failed
 952       return;
 953     }
 954 #endif
 955     TracePhase tp("install_code", &timers[_t_registerMethod]);
 956 
 957     if (is_osr_compilation()) {
 958       _code_offsets.set_value(CodeOffsets::Verified_Entry, 0);
 959       _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size);
 960     } else {
 961       _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size);
 962       if (_code_offsets.value(CodeOffsets::Verified_Value_Entry) == -1) {
 963         _code_offsets.set_value(CodeOffsets::Verified_Value_Entry, _first_block_size);
 964       }
 965       if (_code_offsets.value(CodeOffsets::Entry) == -1) {
 966         // We emitted a value type entry point, adjust normal entry
 967         _code_offsets.set_value(CodeOffsets::Entry, _first_block_size);
 968       }
 969       _code_offsets.set_value(CodeOffsets::OSR_Entry, 0);
 970     }
 971 
 972     env()->register_method(_method, _entry_bci,
 973                            &_code_offsets,
 974                            _orig_pc_slot_offset_in_bytes,
 975                            code_buffer(),
 976                            frame_size_in_words(), _oop_map_set,
 977                            &_handler_table, &_inc_table,
 978                            compiler,
 979                            has_unsafe_access(),
 980                            SharedRuntime::is_wide_vector(max_vector_size()),
 981                            rtm_state()
 982                            );
 983 
 984     if (log() != NULL) // Print code cache state into compiler log
 985       log()->code_cache_state();
 986   }
 987 }
 988 


 994                   const char *stub_name,
 995                   int is_fancy_jump,
 996                   bool pass_tls,
 997                   bool save_arg_registers,
 998                   bool return_pc,
 999                   DirectiveSet* directive)
1000   : Phase(Compiler),
1001     _compile_id(0),
1002     _save_argument_registers(save_arg_registers),
1003     _subsume_loads(true),
1004     _do_escape_analysis(false),
1005     _eliminate_boxing(false),
1006     _method(NULL),
1007     _entry_bci(InvocationEntryBci),
1008     _stub_function(stub_function),
1009     _stub_name(stub_name),
1010     _stub_entry_point(NULL),
1011     _max_node_limit(MaxNodeLimit),
1012     _orig_pc_slot(0),
1013     _orig_pc_slot_offset_in_bytes(0),
1014     _sp_inc_slot(0),
1015     _sp_inc_slot_offset_in_bytes(0),
1016     _inlining_progress(false),
1017     _inlining_incrementally(false),
1018     _has_reserved_stack_access(false),
1019 #ifndef PRODUCT
1020     _trace_opto_output(directive->TraceOptoOutputOption),
1021 #endif
1022     _has_method_handle_invokes(false),
1023     _comp_arena(mtCompiler),
1024     _env(ci_env),
1025     _directive(directive),
1026     _log(ci_env->log()),
1027     _failure_reason(NULL),
1028     _congraph(NULL),
1029 #ifndef PRODUCT
1030     _printer(NULL),
1031 #endif
1032     _dead_node_list(comp_arena()),
1033     _dead_node_count(0),
1034     _node_arena(mtCompiler),
1035     _old_arena(mtCompiler),


< prev index next >