25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/debugInfo.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/locknode.hpp"
36 #include "opto/machnode.hpp"
37 #include "opto/output.hpp"
38 #include "opto/regalloc.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
41 #include "opto/type.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "utilities/xmlstream.hpp"
44
45 extern uint size_exception_handler();
46 extern uint size_deopt_handler();
47
48 #ifndef PRODUCT
49 #define DEBUG_ARG(x) , x
50 #else
51 #define DEBUG_ARG(x)
52 #endif
53
54 extern int emit_exception_handler(CodeBuffer &cbuf);
55 extern int emit_deopt_handler(CodeBuffer &cbuf);
56
57 // Convert Nodes to instruction bits and pass off to the VM
58 void Compile::Output() {
59 // RootNode goes
60 assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
61
62 // The number of new nodes (mostly MachNop) is proportional to
63 // the number of java calls and inner loops which are aligned.
64 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
65 C->inner_loops()*(OptoLoopAlignment-1)),
66 "out of nodes before code generation" ) ) {
67 return;
68 }
69 // Make sure I can find the Start Node
70 Block *entry = _cfg->get_block(1);
71 Block *broot = _cfg->get_root_block();
72
73 const StartNode *start = entry->head()->as_Start();
74
75 // Replace StartNode with prolog
76 MachPrologNode *prolog = new (this) MachPrologNode();
377 // offset of jump in jmp_offset, rather than the absolute offset of jump.
378 // This is so that we do not need to recompute sizes of all nodes when
379 // we compute correct blk_starts in our next sizing pass.
380 jmp_offset[i] = 0;
381 jmp_size[i] = 0;
382 jmp_nidx[i] = -1;
383 DEBUG_ONLY( jmp_target[i] = 0; )
384 DEBUG_ONLY( jmp_rule[i] = 0; )
385
386 // Sum all instruction sizes to compute block size
387 uint last_inst = block->number_of_nodes();
388 uint blk_size = 0;
389 for (uint j = 0; j < last_inst; j++) {
390 Node* nj = block->get_node(j);
391 // Handle machine instruction nodes
392 if (nj->is_Mach()) {
393 MachNode *mach = nj->as_Mach();
394 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
395 reloc_size += mach->reloc();
396 if (mach->is_MachCall()) {
397 MachCallNode *mcall = mach->as_MachCall();
398 // This destination address is NOT PC-relative
399
400 mcall->method_set((intptr_t)mcall->entry_point());
401
402 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
403 stub_size += CompiledStaticCall::to_interp_stub_size();
404 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
405 }
406 } else if (mach->is_MachSafePoint()) {
407 // If call/safepoint are adjacent, account for possible
408 // nop to disambiguate the two safepoints.
409 // ScheduleAndBundle() can rearrange nodes in a block,
410 // check for all offsets inside this block.
411 if (last_call_adr >= blk_starts[i]) {
412 blk_size += nop_size;
413 }
414 }
415 if (mach->avoid_back_to_back()) {
416 // Nop is inserted between "avoid back to back" instructions.
1116 }
1117 }
1118 }
1119
1120 // Calculate the offsets of the constants and the size of the
1121 // constant table (including the padding to the next section).
1122 constant_table().calculate_offsets_and_size();
1123 const_req = constant_table().size() + add_size;
1124 }
1125
1126 // Initialize the space for the BufferBlob used to find and verify
1127 // instruction size in MachNode::emit_size()
1128 init_scratch_buffer_blob(const_req);
1129 if (failing()) return NULL; // Out of memory
1130
1131 // Pre-compute the length of blocks and replace
1132 // long branches with short if machine supports it.
1133 shorten_branches(blk_starts, code_req, locs_req, stub_req);
1134
1135 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1136 int exception_handler_req = size_exception_handler();
1137 int deopt_handler_req = size_deopt_handler();
1138 exception_handler_req += MAX_stubs_size; // add marginal slop for handler
1139 deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
1140 stub_req += MAX_stubs_size; // ensure per-stub margin
1141 code_req += MAX_inst_size; // ensure per-instruction margin
1142
1143 if (StressCodeBuffers)
1144 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1145
1146 int total_req =
1147 const_req +
1148 code_req +
1149 pad_req +
1150 stub_req +
1151 exception_handler_req +
1152 deopt_handler_req; // deopt handler
1153
1154 if (has_method_handle_invokes())
1155 total_req += deopt_handler_req; // deopt MH handler
1156
1157 CodeBuffer* cb = code_buffer();
1158 cb->initialize(total_req, locs_req);
1159
1605 int br_size = jmp_size[i];
1606 int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
1607 if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
1608 tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
1609 assert(false, "Displacement too large for short jmp");
1610 }
1611 }
1612 }
1613 #endif
1614
1615 #ifndef PRODUCT
1616 // Information on the size of the method, without the extraneous code
1617 Scheduling::increment_method_size(cb->insts_size());
1618 #endif
1619
1620 // ------------------
1621 // Fill in exception table entries.
1622 FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
1623
1624 // Only java methods have exception handlers and deopt handlers
1625 if (_method) {
1626 // Emit the exception handler code.
1627 _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
1628 // Emit the deopt handler code.
1629 _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
1630
1631 // Emit the MethodHandle deopt handler code (if required).
1632 if (has_method_handle_invokes()) {
1633 // We can use the same code as for the normal deopt handler, we
1634 // just need a different entry point address.
1635 _code_offsets.set_value(CodeOffsets::DeoptMH, emit_deopt_handler(*cb));
1636 }
1637 }
1638
1639 // One last check for failed CodeBuffer::expand:
1640 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1641 C->record_failure("CodeCache is full");
1642 return;
1643 }
1644
1645 #ifndef PRODUCT
1646 // Dump the assembly code, including basic-block numbers
1647 if (print_assembly()) {
1648 ttyLocker ttyl; // keep the following output all in one block
1649 if (!VMThread::should_terminate()) { // test this under the tty lock
1650 // This output goes directly to the tty, not the compiler log.
1651 // To enable tools to match it up with the compilation activity,
1652 // be sure to tag this tty output with the compile ID.
1653 if (xtty != NULL) {
1654 xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
1655 is_osr_compilation() ? " compile_kind='osr'" :
|
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/debugInfo.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/oopMap.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "opto/callnode.hpp"
34 #include "opto/cfgnode.hpp"
35 #include "opto/locknode.hpp"
36 #include "opto/machnode.hpp"
37 #include "opto/output.hpp"
38 #include "opto/regalloc.hpp"
39 #include "opto/runtime.hpp"
40 #include "opto/subnode.hpp"
41 #include "opto/type.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "utilities/xmlstream.hpp"
44
45 #ifndef PRODUCT
46 #define DEBUG_ARG(x) , x
47 #else
48 #define DEBUG_ARG(x)
49 #endif
50
51 // Convert Nodes to instruction bits and pass off to the VM
52 void Compile::Output() {
53 // RootNode goes
54 assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
55
56 // The number of new nodes (mostly MachNop) is proportional to
57 // the number of java calls and inner loops which are aligned.
58 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
59 C->inner_loops()*(OptoLoopAlignment-1)),
60 "out of nodes before code generation" ) ) {
61 return;
62 }
63 // Make sure I can find the Start Node
64 Block *entry = _cfg->get_block(1);
65 Block *broot = _cfg->get_root_block();
66
67 const StartNode *start = entry->head()->as_Start();
68
69 // Replace StartNode with prolog
70 MachPrologNode *prolog = new (this) MachPrologNode();
371 // offset of jump in jmp_offset, rather than the absolute offset of jump.
372 // This is so that we do not need to recompute sizes of all nodes when
373 // we compute correct blk_starts in our next sizing pass.
374 jmp_offset[i] = 0;
375 jmp_size[i] = 0;
376 jmp_nidx[i] = -1;
377 DEBUG_ONLY( jmp_target[i] = 0; )
378 DEBUG_ONLY( jmp_rule[i] = 0; )
379
380 // Sum all instruction sizes to compute block size
381 uint last_inst = block->number_of_nodes();
382 uint blk_size = 0;
383 for (uint j = 0; j < last_inst; j++) {
384 Node* nj = block->get_node(j);
385 // Handle machine instruction nodes
386 if (nj->is_Mach()) {
387 MachNode *mach = nj->as_Mach();
388 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
389 reloc_size += mach->reloc();
390 if (mach->is_MachCall()) {
391 // add size information for trampoline stub
392 // class CallStubImpl is platform-specific and defined in the *.ad files.
393 stub_size += CallStubImpl::size_call_trampoline();
394 reloc_size += CallStubImpl::reloc_call_trampoline();
395
396 MachCallNode *mcall = mach->as_MachCall();
397 // This destination address is NOT PC-relative
398
399 mcall->method_set((intptr_t)mcall->entry_point());
400
401 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
402 stub_size += CompiledStaticCall::to_interp_stub_size();
403 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
404 }
405 } else if (mach->is_MachSafePoint()) {
406 // If call/safepoint are adjacent, account for possible
407 // nop to disambiguate the two safepoints.
408 // ScheduleAndBundle() can rearrange nodes in a block,
409 // check for all offsets inside this block.
410 if (last_call_adr >= blk_starts[i]) {
411 blk_size += nop_size;
412 }
413 }
414 if (mach->avoid_back_to_back()) {
415 // Nop is inserted between "avoid back to back" instructions.
1115 }
1116 }
1117 }
1118
1119 // Calculate the offsets of the constants and the size of the
1120 // constant table (including the padding to the next section).
1121 constant_table().calculate_offsets_and_size();
1122 const_req = constant_table().size() + add_size;
1123 }
1124
1125 // Initialize the space for the BufferBlob used to find and verify
1126 // instruction size in MachNode::emit_size()
1127 init_scratch_buffer_blob(const_req);
1128 if (failing()) return NULL; // Out of memory
1129
1130 // Pre-compute the length of blocks and replace
1131 // long branches with short if machine supports it.
1132 shorten_branches(blk_starts, code_req, locs_req, stub_req);
1133
1134 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1135 // class HandlerImpl is platform-specific and defined in the *.ad files.
1136 int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler
1137 int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
1138 stub_req += MAX_stubs_size; // ensure per-stub margin
1139 code_req += MAX_inst_size; // ensure per-instruction margin
1140
1141 if (StressCodeBuffers)
1142 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1143
1144 int total_req =
1145 const_req +
1146 code_req +
1147 pad_req +
1148 stub_req +
1149 exception_handler_req +
1150 deopt_handler_req; // deopt handler
1151
1152 if (has_method_handle_invokes())
1153 total_req += deopt_handler_req; // deopt MH handler
1154
1155 CodeBuffer* cb = code_buffer();
1156 cb->initialize(total_req, locs_req);
1157
1603 int br_size = jmp_size[i];
1604 int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
1605 if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
1606 tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
1607 assert(false, "Displacement too large for short jmp");
1608 }
1609 }
1610 }
1611 #endif
1612
1613 #ifndef PRODUCT
1614 // Information on the size of the method, without the extraneous code
1615 Scheduling::increment_method_size(cb->insts_size());
1616 #endif
1617
1618 // ------------------
1619 // Fill in exception table entries.
1620 FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
1621
1622 // Only java methods have exception handlers and deopt handlers
1623 // class HandlerImpl is platform-specific and defined in the *.ad files.
1624 if (_method) {
1625 // Emit the exception handler code.
1626 _code_offsets.set_value(CodeOffsets::Exceptions, HandlerImpl::emit_exception_handler(*cb));
1627 // Emit the deopt handler code.
1628 _code_offsets.set_value(CodeOffsets::Deopt, HandlerImpl::emit_deopt_handler(*cb));
1629
1630 // Emit the MethodHandle deopt handler code (if required).
1631 if (has_method_handle_invokes()) {
1632 // We can use the same code as for the normal deopt handler, we
1633 // just need a different entry point address.
1634 _code_offsets.set_value(CodeOffsets::DeoptMH, HandlerImpl::emit_deopt_handler(*cb));
1635 }
1636 }
1637
1638 // One last check for failed CodeBuffer::expand:
1639 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1640 C->record_failure("CodeCache is full");
1641 return;
1642 }
1643
1644 #ifndef PRODUCT
1645 // Dump the assembly code, including basic-block numbers
1646 if (print_assembly()) {
1647 ttyLocker ttyl; // keep the following output all in one block
1648 if (!VMThread::should_terminate()) { // test this under the tty lock
1649 // This output goes directly to the tty, not the compiler log.
1650 // To enable tools to match it up with the compilation activity,
1651 // be sure to tag this tty output with the compile ID.
1652 if (xtty != NULL) {
1653 xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
1654 is_osr_compilation() ? " compile_kind='osr'" :
|