40 #include "opto/subnode.hpp"
41 #include "opto/type.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "utilities/xmlstream.hpp"
44
45 extern uint size_exception_handler();
46 extern uint size_deopt_handler();
47
48 #ifndef PRODUCT
49 #define DEBUG_ARG(x) , x
50 #else
51 #define DEBUG_ARG(x)
52 #endif
53
54 extern int emit_exception_handler(CodeBuffer &cbuf);
55 extern int emit_deopt_handler(CodeBuffer &cbuf);
56
57 // Convert Nodes to instruction bits and pass off to the VM
58 void Compile::Output() {
59 // RootNode goes
60 assert( _cfg->get_root_block()->_nodes.size() == 0, "" );
61
62 // The number of new nodes (mostly MachNop) is proportional to
63 // the number of java calls and inner loops which are aligned.
64 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
65 C->inner_loops()*(OptoLoopAlignment-1)),
66 "out of nodes before code generation" ) ) {
67 return;
68 }
69 // Make sure I can find the Start Node
70 Block *entry = _cfg->get_block(1);
71 Block *broot = _cfg->get_root_block();
72
73 const StartNode *start = entry->_nodes[0]->as_Start();
74
75 // Replace StartNode with prolog
76 MachPrologNode *prolog = new (this) MachPrologNode();
77 entry->_nodes.map( 0, prolog );
78 _cfg->map_node_to_block(prolog, entry);
79 _cfg->unmap_node_from_block(start); // start is no longer in any block
80
81 // Virtual methods need an unverified entry point
82
83 if( is_osr_compilation() ) {
84 if( PoisonOSREntry ) {
85 // TODO: Should use a ShouldNotReachHereNode...
86 _cfg->insert( broot, 0, new (this) MachBreakpointNode() );
87 }
88 } else {
89 if( _method && !_method->flags().is_static() ) {
90 // Insert unvalidated entry point
91 _cfg->insert( broot, 0, new (this) MachUEPNode() );
92 }
93
94 }
95
96
97 // Break before main entry point
127 # endif
128
129 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
130 blk_starts[0] = 0;
131
132 // Initialize code buffer and process short branches.
133 CodeBuffer* cb = init_buffer(blk_starts);
134
135 if (cb == NULL || failing()) {
136 return;
137 }
138
139 ScheduleAndBundle();
140
141 #ifndef PRODUCT
142 if (trace_opto_output()) {
143 tty->print("\n---- After ScheduleAndBundle ----\n");
144 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
145 tty->print("\nBB#%03d:\n", i);
146 Block* block = _cfg->get_block(i);
147 for (uint j = 0; j < block->_nodes.size(); j++) {
148 Node* n = block->_nodes[j];
149 OptoReg::Name reg = _regalloc->get_reg_first(n);
150 tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
151 n->dump();
152 }
153 }
154 }
155 #endif
156
157 if (failing()) {
158 return;
159 }
160
161 BuildOopMaps();
162
163 if (failing()) {
164 return;
165 }
166
167 fill_buffer(cb, blk_starts);
168 }
209 else if ( CompileZapFirst > CompiledZap_count() ) skip = true;
210 else if ( CompileZapFirst == CompiledZap_count() )
211 warning("starting zap compilation after skipping");
212
213 if ( CompileZapLast == -1 ) ; // nothing special
214 else if ( CompileZapLast < CompiledZap_count() ) skip = true;
215 else if ( CompileZapLast == CompiledZap_count() )
216 warning("about to compile last zap");
217
218 ++_CompiledZap_count; // counts skipped zaps, too
219
220 if ( skip ) return;
221
222
223 if ( _method == NULL )
224 return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
225
226 // Insert call to zap runtime stub before every node with an oop map
227 for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
228 Block *b = _cfg->get_block(i);
229 for ( uint j = 0; j < b->_nodes.size(); ++j ) {
230 Node *n = b->_nodes[j];
231
232 // Determining if we should insert a zap-a-lot node in output.
233 // We do that for all nodes that has oopmap info, except for calls
234 // to allocation. Calls to allocation passes in the old top-of-eden pointer
235 // and expect the C code to reset it. Hence, there can be no safepoints between
236 // the inlined-allocation and the call to new_Java, etc.
237 // We also cannot zap monitor calls, as they must hold the microlock
238 // during the call to Zap, which also wants to grab the microlock.
239 bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
240 if ( insert ) { // it is MachSafePoint
241 if ( !n->is_MachCall() ) {
242 insert = false;
243 } else if ( n->is_MachCall() ) {
244 MachCallNode* call = n->as_MachCall();
245 if (call->entry_point() == OptoRuntime::new_instance_Java() ||
246 call->entry_point() == OptoRuntime::new_array_Java() ||
247 call->entry_point() == OptoRuntime::multianewarray2_Java() ||
248 call->entry_point() == OptoRuntime::multianewarray3_Java() ||
249 call->entry_point() == OptoRuntime::multianewarray4_Java() ||
250 call->entry_point() == OptoRuntime::multianewarray5_Java() ||
251 call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
252 call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
253 ) {
254 insert = false;
255 }
256 }
257 if (insert) {
258 Node *zap = call_zap_node(n->as_MachSafePoint(), i);
259 b->_nodes.insert( j, zap );
260 _cfg->map_node_to_block(zap, b);
261 ++j;
262 }
263 }
264 }
265 }
266 }
267
268
269 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
270 const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
271 CallStaticJavaNode* ideal_node =
272 new (this) CallStaticJavaNode( tf,
273 OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
274 "call zap dead locals stub", 0, TypePtr::BOTTOM);
275 // We need to copy the OopMap from the site we're zapping at.
276 // We have to make a copy, because the zap site might not be
277 // a call site, and zap_dead is a call site.
278 OopMap* clone = node_to_check->oop_map()->deep_copy();
279
362 // third inserts nops where needed.
363
364 // Step one, perform a pessimistic sizing pass.
365 uint last_call_adr = max_uint;
366 uint last_avoid_back_to_back_adr = max_uint;
367 uint nop_size = (new (this) MachNopNode())->size(_regalloc);
368 for (uint i = 0; i < nblocks; i++) { // For all blocks
369 Block* block = _cfg->get_block(i);
370
371 // During short branch replacement, we store the relative (to blk_starts)
372 // offset of jump in jmp_offset, rather than the absolute offset of jump.
373 // This is so that we do not need to recompute sizes of all nodes when
374 // we compute correct blk_starts in our next sizing pass.
375 jmp_offset[i] = 0;
376 jmp_size[i] = 0;
377 jmp_nidx[i] = -1;
378 DEBUG_ONLY( jmp_target[i] = 0; )
379 DEBUG_ONLY( jmp_rule[i] = 0; )
380
381 // Sum all instruction sizes to compute block size
382 uint last_inst = block->_nodes.size();
383 uint blk_size = 0;
384 for (uint j = 0; j < last_inst; j++) {
385 Node* nj = block->_nodes[j];
386 // Handle machine instruction nodes
387 if (nj->is_Mach()) {
388 MachNode *mach = nj->as_Mach();
389 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
390 reloc_size += mach->reloc();
391 if (mach->is_MachCall()) {
392 MachCallNode *mcall = mach->as_MachCall();
393 // This destination address is NOT PC-relative
394
395 mcall->method_set((intptr_t)mcall->entry_point());
396
397 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
398 stub_size += CompiledStaticCall::to_interp_stub_size();
399 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
400 }
401 } else if (mach->is_MachSafePoint()) {
402 // If call/safepoint are adjacent, account for possible
403 // nop to disambiguate the two safepoints.
404 // ScheduleAndBundle() can rearrange nodes in a block,
405 // check for all offsets inside this block.
460 last_avoid_back_to_back_adr += max_loop_pad;
461 }
462 blk_size += max_loop_pad;
463 }
464 }
465
466 // Save block size; update total method size
467 blk_starts[i+1] = blk_starts[i]+blk_size;
468 }
469
470 // Step two, replace eligible long jumps.
471 bool progress = true;
472 uint last_may_be_short_branch_adr = max_uint;
473 while (has_short_branch_candidate && progress) {
474 progress = false;
475 has_short_branch_candidate = false;
476 int adjust_block_start = 0;
477 for (uint i = 0; i < nblocks; i++) {
478 Block* block = _cfg->get_block(i);
479 int idx = jmp_nidx[i];
480 MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach();
481 if (mach != NULL && mach->may_be_short_branch()) {
482 #ifdef ASSERT
483 assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
484 int j;
485 // Find the branch; ignore trailing NOPs.
486 for (j = block->_nodes.size()-1; j>=0; j--) {
487 Node* n = block->_nodes[j];
488 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
489 break;
490 }
491 assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity");
492 #endif
493 int br_size = jmp_size[i];
494 int br_offs = blk_starts[i] + jmp_offset[i];
495
496 // This requires the TRUE branch target be in succs[0]
497 uint bnum = block->non_connector_successor(0)->_pre_order;
498 int offset = blk_starts[bnum] - br_offs;
499 if (bnum > i) { // adjust following block's offset
500 offset -= adjust_block_start;
501 }
502 // In the following code a nop could be inserted before
503 // the branch which will increase the backward distance.
504 bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
505 if (needs_padding && offset <= 0)
506 offset -= nop_size;
507
508 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
509 // We've got a winner. Replace this branch.
510 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
511
512 // Update the jmp_size.
513 int new_size = replacement->size(_regalloc);
514 int diff = br_size - new_size;
515 assert(diff >= (int)nop_size, "short_branch size should be smaller");
516 // Conservatively take into accound padding between
517 // avoid_back_to_back branches. Previous branch could be
518 // converted into avoid_back_to_back branch during next
519 // rounds.
520 if (needs_padding && replacement->avoid_back_to_back()) {
521 jmp_offset[i] += nop_size;
522 diff -= nop_size;
523 }
524 adjust_block_start += diff;
525 block->_nodes.map(idx, replacement);
526 mach->subsume_by(replacement, C);
527 mach = replacement;
528 progress = true;
529
530 jmp_size[i] = new_size;
531 DEBUG_ONLY( jmp_target[i] = bnum; );
532 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
533 } else {
534 // The jump distance is not short, try again during next iteration.
535 has_short_branch_candidate = true;
536 }
537 } // (mach->may_be_short_branch())
538 if (mach != NULL && (mach->may_be_short_branch() ||
539 mach->avoid_back_to_back())) {
540 last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
541 }
542 blk_starts[i+1] -= adjust_block_start;
543 }
544 }
545
1071 // The usual spill mechanism can only generate stfd's in this case, which
1072 // doesn't work if the fp reg to spill contains a single-precision denorm.
1073 // Instead, we hack around the normal spill mechanism using stfspill's and
1074 // ldffill's in the MachProlog and MachEpilog emit methods. We allocate
1075 // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1076 //
1077 // If we ever implement 16-byte 'registers' == stack slots, we can
1078 // get rid of this hack and have SpillCopy generate stfspill/ldffill
1079 // instead of stfd/stfs/ldfd/ldfs.
1080 _frame_slots += 8*(16/BytesPerInt);
1081 }
1082 #endif
1083 assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
1084
1085 if (has_mach_constant_base_node()) {
1086 // Fill the constant table.
1087 // Note: This must happen before shorten_branches.
1088 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1089 Block* b = _cfg->get_block(i);
1090
1091 for (uint j = 0; j < b->_nodes.size(); j++) {
1092 Node* n = b->_nodes[j];
1093
1094 // If the node is a MachConstantNode evaluate the constant
1095 // value section.
1096 if (n->is_MachConstant()) {
1097 MachConstantNode* machcon = n->as_MachConstant();
1098 machcon->eval_constant(C);
1099 }
1100 }
1101 }
1102
1103 // Calculate the offsets of the constants and the size of the
1104 // constant table (including the padding to the next section).
1105 constant_table().calculate_offsets_and_size();
1106 const_req = constant_table().size();
1107 }
1108
1109 // Initialize the space for the BufferBlob used to find and verify
1110 // instruction size in MachNode::emit_size()
1111 init_scratch_buffer_blob(const_req);
1112 if (failing()) return NULL; // Out of memory
1230 if (Pipeline::requires_bundling() && starts_bundle(head)) {
1231 cb->flush_bundle(true);
1232 }
1233
1234 #ifdef ASSERT
1235 if (!block->is_connector()) {
1236 stringStream st;
1237 block->dump_head(_cfg, &st);
1238 MacroAssembler(cb).block_comment(st.as_string());
1239 }
1240 jmp_target[i] = 0;
1241 jmp_offset[i] = 0;
1242 jmp_size[i] = 0;
1243 jmp_rule[i] = 0;
1244 #endif
1245 int blk_offset = current_offset;
1246
1247 // Define the label at the beginning of the basic block
1248 MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
1249
1250 uint last_inst = block->_nodes.size();
1251
1252 // Emit block normally, except for last instruction.
1253 // Emit means "dump code bits into code buffer".
1254 for (uint j = 0; j<last_inst; j++) {
1255
1256 // Get the node
1257 Node* n = block->_nodes[j];
1258
1259 // See if delay slots are supported
1260 if (valid_bundle_info(n) &&
1261 node_bundling(n)->used_in_unconditional_delay()) {
1262 assert(delay_slot == NULL, "no use of delay slot node");
1263 assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
1264
1265 delay_slot = n;
1266 continue;
1267 }
1268
1269 // If this starts a new instruction group, then flush the current one
1270 // (but allow split bundles)
1271 if (Pipeline::requires_bundling() && starts_bundle(n))
1272 cb->flush_bundle(false);
1273
1274 // The following logic is duplicated in the code ifdeffed for
1275 // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It
1276 // should be factored out. Or maybe dispersed to the nodes?
1277
1291 // A padding may be needed again since a previous instruction
1292 // could be moved to delay slot.
1293
1294 // align the instruction if necessary
1295 int padding = mach->compute_padding(current_offset);
1296 // Make sure safepoint node for polling is distinct from a call's
1297 // return by adding a nop if needed.
1298 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1299 padding = nop_size;
1300 }
1301 if (padding == 0 && mach->avoid_back_to_back() &&
1302 current_offset == last_avoid_back_to_back_offset) {
1303 // Avoid back to back some instructions.
1304 padding = nop_size;
1305 }
1306
1307 if(padding > 0) {
1308 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1309 int nops_cnt = padding / nop_size;
1310 MachNode *nop = new (this) MachNopNode(nops_cnt);
1311 block->_nodes.insert(j++, nop);
1312 last_inst++;
1313 _cfg->map_node_to_block(nop, block);
1314 nop->emit(*cb, _regalloc);
1315 cb->flush_bundle(true);
1316 current_offset = cb->insts_size();
1317 }
1318
1319 // Remember the start of the last call in a basic block
1320 if (is_mcall) {
1321 MachCallNode *mcall = mach->as_MachCall();
1322
1323 // This destination address is NOT PC-relative
1324 mcall->method_set((intptr_t)mcall->entry_point());
1325
1326 // Save the return address
1327 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1328
1329 if (mcall->is_MachCallLeaf()) {
1330 is_mcall = false;
1331 is_sfn = false;
1377 // finilized yet, adjust distance by the difference
1378 // between calculated and final offsets of current block.
1379 offset -= (blk_starts[i] - blk_offset);
1380 }
1381 // In the following code a nop could be inserted before
1382 // the branch which will increase the backward distance.
1383 bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1384 if (needs_padding && offset <= 0)
1385 offset -= nop_size;
1386
1387 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1388 // We've got a winner. Replace this branch.
1389 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
1390
1391 // Update the jmp_size.
1392 int new_size = replacement->size(_regalloc);
1393 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1394 // Insert padding between avoid_back_to_back branches.
1395 if (needs_padding && replacement->avoid_back_to_back()) {
1396 MachNode *nop = new (this) MachNopNode();
1397 block->_nodes.insert(j++, nop);
1398 _cfg->map_node_to_block(nop, block);
1399 last_inst++;
1400 nop->emit(*cb, _regalloc);
1401 cb->flush_bundle(true);
1402 current_offset = cb->insts_size();
1403 }
1404 #ifdef ASSERT
1405 jmp_target[i] = block_num;
1406 jmp_offset[i] = current_offset - blk_offset;
1407 jmp_size[i] = new_size;
1408 jmp_rule[i] = mach->rule();
1409 #endif
1410 block->_nodes.map(j, replacement);
1411 mach->subsume_by(replacement, C);
1412 n = replacement;
1413 mach = replacement;
1414 }
1415 }
1416 mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
1417 } else if (mach->ideal_Opcode() == Op_Jump) {
1418 for (uint h = 0; h < block->_num_succs; h++) {
1419 Block* succs_block = block->_succs[h];
1420 for (uint j = 1; j < succs_block->num_preds(); j++) {
1421 Node* jpn = succs_block->pred(j);
1422 if (jpn->is_JumpProj() && jpn->in(0) == mach) {
1423 uint block_num = succs_block->non_connector()->_pre_order;
1424 Label *blkLabel = &blk_labels[block_num];
1425 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1426 }
1427 }
1428 }
1429 }
1430 #ifdef ASSERT
1431 // Check that oop-store precedes the card-mark
1432 else if (mach->ideal_Opcode() == Op_StoreCM) {
1433 uint storeCM_idx = j;
1434 int count = 0;
1435 for (uint prec = mach->req(); prec < mach->len(); prec++) {
1436 Node *oop_store = mach->in(prec); // Precedence edge
1437 if (oop_store == NULL) continue;
1438 count++;
1439 uint i4;
1440 for (i4 = 0; i4 < last_inst; ++i4) {
1441 if (block->_nodes[i4] == oop_store) {
1442 break;
1443 }
1444 }
1445 // Note: This test can provide a false failure if other precedence
1446 // edges have been added to the storeCMNode.
1447 assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
1448 }
1449 assert(count > 0, "storeCM expects at least one precedence edge");
1450 }
1451 #endif
1452 else if (!n->is_Proj()) {
1453 // Remember the beginning of the previous instruction, in case
1454 // it's followed by a flag-kill and a null-check. Happens on
1455 // Intel all the time, with add-to-memory kind of opcodes.
1456 previous_offset = current_offset;
1457 }
1458 }
1459
1460 // Verify that there is sufficient space remaining
1461 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1531 // Generate an OopMap entry
1532 Process_OopMap_Node(mach, adjusted_offset);
1533 }
1534
1535 // Insert the delay slot instruction
1536 delay_slot->emit(*cb, _regalloc);
1537
1538 // Don't reuse it
1539 delay_slot = NULL;
1540 }
1541
1542 } // End for all instructions in block
1543
1544 // If the next block is the top of a loop, pad this block out to align
1545 // the loop top a little. Helps prevent pipe stalls at loop back branches.
1546 if (i < nblocks-1) {
1547 Block *nb = _cfg->get_block(i + 1);
1548 int padding = nb->alignment_padding(current_offset);
1549 if( padding > 0 ) {
1550 MachNode *nop = new (this) MachNopNode(padding / nop_size);
1551 block->_nodes.insert(block->_nodes.size(), nop);
1552 _cfg->map_node_to_block(nop, block);
1553 nop->emit(*cb, _regalloc);
1554 current_offset = cb->insts_size();
1555 }
1556 }
1557 // Verify that the distance for generated before forward
1558 // short branches is still valid.
1559 guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
1560
1561 // Save new block start offset
1562 blk_starts[i] = blk_offset;
1563 } // End of for all blocks
1564 blk_starts[nblocks] = current_offset;
1565
1566 non_safepoints.flush_at_end();
1567
1568 // Offset too large?
1569 if (failing()) return;
1570
1571 // Define a pseudo-label at the end of the code
1638 dump_asm(node_offsets, node_offset_limit);
1639 if (xtty != NULL) {
1640 xtty->tail("opto_assembly");
1641 }
1642 }
1643 }
1644 #endif
1645
1646 }
1647
1648 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1649 _inc_table.set_size(cnt);
1650
1651 uint inct_cnt = 0;
1652 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1653 Block* block = _cfg->get_block(i);
1654 Node *n = NULL;
1655 int j;
1656
1657 // Find the branch; ignore trailing NOPs.
1658 for (j = block->_nodes.size() - 1; j >= 0; j--) {
1659 n = block->_nodes[j];
1660 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1661 break;
1662 }
1663 }
1664
1665 // If we didn't find anything, continue
1666 if (j < 0) {
1667 continue;
1668 }
1669
1670 // Compute ExceptionHandlerTable subtable entry and add it
1671 // (skip empty blocks)
1672 if (n->is_Catch()) {
1673
1674 // Get the offset of the return from the call
1675 uint call_return = call_returns[block->_pre_order];
1676 #ifdef ASSERT
1677 assert( call_return > 0, "no call seen for this basic block" );
1678 while (block->_nodes[--j]->is_MachProj()) ;
1679 assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
1680 #endif
1681 // last instruction is a CatchNode, find it's CatchProjNodes
1682 int nof_succs = block->_num_succs;
1683 // allocate space
1684 GrowableArray<intptr_t> handler_bcis(nof_succs);
1685 GrowableArray<intptr_t> handler_pcos(nof_succs);
1686 // iterate through all successors
1687 for (int j = 0; j < nof_succs; j++) {
1688 Block* s = block->_succs[j];
1689 bool found_p = false;
1690 for (uint k = 1; k < s->num_preds(); k++) {
1691 Node* pk = s->pred(k);
1692 if (pk->is_CatchProj() && pk->in(0) == n) {
1693 const CatchProjNode* p = pk->as_CatchProj();
1694 found_p = true;
1695 // add the corresponding handler bci & pco information
1696 if (p->_con != CatchProjNode::fall_through_index) {
1697 // p leads to an exception handler (and is not fall through)
1698 assert(s == _cfg->get_block(s->_pre_order), "bad numbering");
1699 // no duplicates, please
1765 // This one is persistent within the Compile class
1766 _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
1767
1768 // Allocate space for fixed-size arrays
1769 _node_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1770 _uses = NEW_ARENA_ARRAY(arena, short, node_max);
1771 _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1772
1773 // Clear the arrays
1774 memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
1775 memset(_node_latency, 0, node_max * sizeof(unsigned short));
1776 memset(_uses, 0, node_max * sizeof(short));
1777 memset(_current_latency, 0, node_max * sizeof(unsigned short));
1778
1779 // Clear the bundling information
1780 memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
1781
1782 // Get the last node
1783 Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
1784
1785 _next_node = block->_nodes[block->_nodes.size() - 1];
1786 }
1787
1788 #ifndef PRODUCT
1789 // Scheduling destructor
1790 Scheduling::~Scheduling() {
1791 _total_branches += _branches;
1792 _total_unconditional_delays += _unconditional_delays;
1793 }
1794 #endif
1795
1796 // Step ahead "i" cycles
1797 void Scheduling::step(uint i) {
1798
1799 Bundle *bundle = node_bundling(_next_node);
1800 bundle->set_starts_bundle();
1801
1802 // Update the bundle record, but leave the flags information alone
1803 if (_bundle_instr_count > 0) {
1804 bundle->set_instr_count(_bundle_instr_count);
1805 bundle->set_resources_used(_bundle_use.resourcesUsed());
1858 scheduling.DoScheduling();
1859 }
1860
1861 // Compute the latency of all the instructions. This is fairly simple,
1862 // because we already have a legal ordering. Walk over the instructions
1863 // from first to last, and compute the latency of the instruction based
1864 // on the latency of the preceding instruction(s).
1865 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1866 #ifndef PRODUCT
1867 if (_cfg->C->trace_opto_output())
1868 tty->print("# -> ComputeLocalLatenciesForward\n");
1869 #endif
1870
1871 // Walk over all the schedulable instructions
1872 for( uint j=_bb_start; j < _bb_end; j++ ) {
1873
1874 // This is a kludge, forcing all latency calculations to start at 1.
1875 // Used to allow latency 0 to force an instruction to the beginning
1876 // of the bb
1877 uint latency = 1;
1878 Node *use = bb->_nodes[j];
1879 uint nlen = use->len();
1880
1881 // Walk over all the inputs
1882 for ( uint k=0; k < nlen; k++ ) {
1883 Node *def = use->in(k);
1884 if (!def)
1885 continue;
1886
1887 uint l = _node_latency[def->_idx] + use->latency(k);
1888 if (latency < l)
1889 latency = l;
1890 }
1891
1892 _node_latency[use->_idx] = latency;
1893
1894 #ifndef PRODUCT
1895 if (_cfg->C->trace_opto_output()) {
1896 tty->print("# latency %4d: ", latency);
1897 use->dump();
1898 }
2269
2270 // Increment the number of instructions in this bundle
2271 _bundle_instr_count += instruction_count;
2272
2273 // Remember this node for later
2274 if (n->is_Mach())
2275 _next_node = n;
2276 }
2277
2278 // It's possible to have a BoxLock in the graph and in the _bbs mapping but
2279 // not in the bb->_nodes array. This happens for debug-info-only BoxLocks.
2280 // 'Schedule' them (basically ignore in the schedule) but do not insert them
2281 // into the block. All other scheduled nodes get put in the schedule here.
2282 int op = n->Opcode();
2283 if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
2284 (op != Op_Node && // Not an unused antidepedence node and
2285 // not an unallocated boxlock
2286 (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
2287
2288 // Push any trailing projections
2289 if( bb->_nodes[bb->_nodes.size()-1] != n ) {
2290 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2291 Node *foi = n->fast_out(i);
2292 if( foi->is_Proj() )
2293 _scheduled.push(foi);
2294 }
2295 }
2296
2297 // Put the instruction in the schedule list
2298 _scheduled.push(n);
2299 }
2300
2301 #ifndef PRODUCT
2302 if (_cfg->C->trace_opto_output())
2303 dump_available();
2304 #endif
2305
2306 // Walk all the definitions, decrementing use counts, and
2307 // if a definition has a 0 use count, place it in the available list.
2308 DecrementUseCounts(n,bb);
2309 }
2312 // uses outside the current basic block. As we are doing a backwards walk,
2313 // any node we reach that has a use count of 0 may be scheduled. This also
2314 // avoids the problem of cyclic references from phi nodes, as long as phi
2315 // nodes are at the front of the basic block. This method also initializes
2316 // the available list to the set of instructions that have no uses within this
2317 // basic block.
2318 void Scheduling::ComputeUseCount(const Block *bb) {
2319 #ifndef PRODUCT
2320 if (_cfg->C->trace_opto_output())
2321 tty->print("# -> ComputeUseCount\n");
2322 #endif
2323
2324 // Clear the list of available and scheduled instructions, just in case
2325 _available.clear();
2326 _scheduled.clear();
2327
2328 // No delay slot specified
2329 _unconditional_delay_slot = NULL;
2330
2331 #ifdef ASSERT
2332 for( uint i=0; i < bb->_nodes.size(); i++ )
2333 assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
2334 #endif
2335
2336 // Force the _uses count to never go to zero for unscheduable pieces
2337 // of the block
2338 for( uint k = 0; k < _bb_start; k++ )
2339 _uses[bb->_nodes[k]->_idx] = 1;
2340 for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
2341 _uses[bb->_nodes[l]->_idx] = 1;
2342
2343 // Iterate backwards over the instructions in the block. Don't count the
2344 // branch projections at end or the block header instructions.
2345 for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
2346 Node *n = bb->_nodes[j];
2347 if( n->is_Proj() ) continue; // Projections handled another way
2348
2349 // Account for all uses
2350 for ( uint k = 0; k < n->len(); k++ ) {
2351 Node *inp = n->in(k);
2352 if (!inp) continue;
2353 assert(inp != n, "no cycles allowed" );
2354 if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
2355 if (inp->is_Proj()) { // Skip through Proj's
2356 inp = inp->in(0);
2357 }
2358 ++_uses[inp->_idx]; // Count 1 block-local use
2359 }
2360 }
2361
2362 // If this instruction has a 0 use count, then it is available
2363 if (!_uses[n->_idx]) {
2364 _current_latency[n->_idx] = _bundle_cycle_number;
2365 AddNodeToAvailableList(n);
2366 }
2381
2382 // This routine performs scheduling on each basic block in reverse order,
2383 // using instruction latencies and taking into account function unit
2384 // availability.
2385 void Scheduling::DoScheduling() {
2386 #ifndef PRODUCT
2387 if (_cfg->C->trace_opto_output())
2388 tty->print("# -> DoScheduling\n");
2389 #endif
2390
2391 Block *succ_bb = NULL;
2392 Block *bb;
2393
2394 // Walk over all the basic blocks in reverse order
2395 for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
2396 bb = _cfg->get_block(i);
2397
2398 #ifndef PRODUCT
2399 if (_cfg->C->trace_opto_output()) {
2400 tty->print("# Schedule BB#%03d (initial)\n", i);
2401 for (uint j = 0; j < bb->_nodes.size(); j++) {
2402 bb->_nodes[j]->dump();
2403 }
2404 }
2405 #endif
2406
2407 // On the head node, skip processing
2408 if (bb == _cfg->get_root_block()) {
2409 continue;
2410 }
2411
2412 // Skip empty, connector blocks
2413 if (bb->is_connector())
2414 continue;
2415
2416 // If the following block is not the sole successor of
2417 // this one, then reset the pipeline information
2418 if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
2419 #ifndef PRODUCT
2420 if (_cfg->C->trace_opto_output()) {
2421 tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
2422 _next_node->_idx, _bundle_instr_count);
2423 }
2424 #endif
2425 step_and_clear();
2426 }
2427
2428 // Leave untouched the starting instruction, any Phis, a CreateEx node
2429 // or Top. bb->_nodes[_bb_start] is the first schedulable instruction.
2430 _bb_end = bb->_nodes.size()-1;
2431 for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
2432 Node *n = bb->_nodes[_bb_start];
2433 // Things not matched, like Phinodes and ProjNodes don't get scheduled.
2434 // Also, MachIdealNodes do not get scheduled
2435 if( !n->is_Mach() ) continue; // Skip non-machine nodes
2436 MachNode *mach = n->as_Mach();
2437 int iop = mach->ideal_Opcode();
2438 if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2439 if( iop == Op_Con ) continue; // Do not schedule Top
2440 if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
2441 mach->pipeline() == MachNode::pipeline_class() &&
2442 !n->is_SpillCopy() ) // Breakpoints, Prolog, etc
2443 continue;
2444 break; // Funny loop structure to be sure...
2445 }
2446 // Compute last "interesting" instruction in block - last instruction we
2447 // might schedule. _bb_end points just after last schedulable inst. We
2448 // normally schedule conditional branches (despite them being forced last
2449 // in the block), because they have delay slots we can fill. Calls all
2450 // have their delay slots filled in the template expansions, so we don't
2451 // bother scheduling them.
2452 Node *last = bb->_nodes[_bb_end];
2453 // Ignore trailing NOPs.
2454 while (_bb_end > 0 && last->is_Mach() &&
2455 last->as_Mach()->ideal_Opcode() == Op_Con) {
2456 last = bb->_nodes[--_bb_end];
2457 }
2458 assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
2459 if( last->is_Catch() ||
2460 // Exclude unreachable path case when Halt node is in a separate block.
2461 (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2462 // There must be a prior call. Skip it.
2463 while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
2464 assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
2465 }
2466 } else if( last->is_MachNullCheck() ) {
2467 // Backup so the last null-checked memory instruction is
2468 // outside the schedulable range. Skip over the nullcheck,
2469 // projection, and the memory nodes.
2470 Node *mem = last->in(1);
2471 do {
2472 _bb_end--;
2473 } while (mem != bb->_nodes[_bb_end]);
2474 } else {
2475 // Set _bb_end to point after last schedulable inst.
2476 _bb_end++;
2477 }
2478
2479 assert( _bb_start <= _bb_end, "inverted block ends" );
2480
2481 // Compute the register antidependencies for the basic block
2482 ComputeRegisterAntidependencies(bb);
2483 if (_cfg->C->failing()) return; // too many D-U pinch points
2484
2485 // Compute intra-bb latencies for the nodes
2486 ComputeLocalLatenciesForward(bb);
2487
2488 // Compute the usage within the block, and set the list of all nodes
2489 // in the block that have no uses within the block.
2490 ComputeUseCount(bb);
2491
2492 // Schedule the remaining instructions in the block
2493 while ( _available.size() > 0 ) {
2494 Node *n = ChooseNodeToBundle();
2495 guarantee(n != NULL, "no nodes available");
2496 AddNodeToBundle(n,bb);
2497 }
2498
2499 assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
2500 #ifdef ASSERT
2501 for( uint l = _bb_start; l < _bb_end; l++ ) {
2502 Node *n = bb->_nodes[l];
2503 uint m;
2504 for( m = 0; m < _bb_end-_bb_start; m++ )
2505 if( _scheduled[m] == n )
2506 break;
2507 assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
2508 }
2509 #endif
2510
2511 // Now copy the instructions (in reverse order) back to the block
2512 for ( uint k = _bb_start; k < _bb_end; k++ )
2513 bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
2514
2515 #ifndef PRODUCT
2516 if (_cfg->C->trace_opto_output()) {
2517 tty->print("# Schedule BB#%03d (final)\n", i);
2518 uint current = 0;
2519 for (uint j = 0; j < bb->_nodes.size(); j++) {
2520 Node *n = bb->_nodes[j];
2521 if( valid_bundle_info(n) ) {
2522 Bundle *bundle = node_bundling(n);
2523 if (bundle->instr_count() > 0 || bundle->flags() > 0) {
2524 tty->print("*** Bundle: ");
2525 bundle->dump();
2526 }
2527 n->dump();
2528 }
2529 }
2530 }
2531 #endif
2532 #ifdef ASSERT
2533 verify_good_schedule(bb,"after block local scheduling");
2534 #endif
2535 }
2536
2537 #ifndef PRODUCT
2538 if (_cfg->C->trace_opto_output())
2539 tty->print("# <- DoScheduling\n");
2540 #endif
2562 Node *prior_use = _reg_node[def];
2563 if( prior_use && !edge_from_to(prior_use,n) ) {
2564 tty->print("%s = ",OptoReg::as_VMReg(def)->name());
2565 n->dump();
2566 tty->print_cr("...");
2567 prior_use->dump();
2568 assert(edge_from_to(prior_use,n),msg);
2569 }
2570 _reg_node.map(def,NULL); // Kill live USEs
2571 }
2572 }
2573
2574 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
2575
2576 // Zap to something reasonable for the verify code
2577 _reg_node.clear();
2578
2579 // Walk over the block backwards. Check to make sure each DEF doesn't
2580 // kill a live value (other than the one it's supposed to). Add each
2581 // USE to the live set.
2582 for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
2583 Node *n = b->_nodes[i];
2584 int n_op = n->Opcode();
2585 if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2586 // Fat-proj kills a slew of registers
2587 RegMask rm = n->out_RegMask();// Make local copy
2588 while( rm.is_NotEmpty() ) {
2589 OptoReg::Name kill = rm.find_first_elem();
2590 rm.Remove(kill);
2591 verify_do_def( n, kill, msg );
2592 }
2593 } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
2594 // Get DEF'd registers the normal way
2595 verify_do_def( n, _regalloc->get_reg_first(n), msg );
2596 verify_do_def( n, _regalloc->get_reg_second(n), msg );
2597 }
2598
2599 // Now make all USEs live
2600 for( uint i=1; i<n->req(); i++ ) {
2601 Node *def = n->in(i);
2602 assert(def != 0, "input edge required");
2603 OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
2694 }
2695 }
2696 }
2697
2698 // Add edge from kill to pinch-point
2699 add_prec_edge_from_to(kill,pinch);
2700 }
2701
2702 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
2703 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
2704 return;
2705 Node *pinch = _reg_node[use_reg]; // Get pinch point
2706 // Check for no later def_reg/kill in block
2707 if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
2708 // Use has to be block-local as well
2709 _cfg->get_block_for_node(use) == b) {
2710 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2711 pinch->req() == 1 ) { // pinch not yet in block?
2712 pinch->del_req(0); // yank pointer to later-def, also set flag
2713 // Insert the pinch-point in the block just after the last use
2714 b->_nodes.insert(b->find_node(use)+1,pinch);
2715 _bb_end++; // Increase size scheduled region in block
2716 }
2717
2718 add_prec_edge_from_to(pinch,use);
2719 }
2720 }
2721
2722 // We insert antidependences between the reads and following write of
2723 // allocated registers to prevent illegal code motion. Hopefully, the
2724 // number of added references should be fairly small, especially as we
2725 // are only adding references within the current basic block.
2726 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
2727
2728 #ifdef ASSERT
2729 verify_good_schedule(b,"before block local scheduling");
2730 #endif
2731
2732 // A valid schedule, for each register independently, is an endless cycle
2733 // of: a def, then some uses (connected to the def by true dependencies),
2734 // then some kills (defs with no uses), finally the cycle repeats with a new
2746
2747 // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
2748 // register. If not, we record the DEF/KILL in _reg_node, the
2749 // register-to-def mapping. If there is a prior DEF/KILL, we insert a
2750 // "pinch point", a new Node that's in the graph but not in the block.
2751 // We put edges from the prior and current DEF/KILLs to the pinch point.
2752 // We put the pinch point in _reg_node. If there's already a pinch point
2753 // we merely add an edge from the current DEF/KILL to the pinch point.
2754
2755 // After doing the DEF/KILLs, we handle USEs. For each used register, we
2756 // put an edge from the pinch point to the USE.
2757
2758 // To be expedient, the _reg_node array is pre-allocated for the whole
2759 // compilation. _reg_node is lazily initialized; it either contains a NULL,
2760 // or a valid def/kill/pinch-point, or a leftover node from some prior
2761 // block. Leftover node from some prior block is treated like a NULL (no
2762 // prior def, so no anti-dependence needed). Valid def is distinguished by
2763 // it being in the current block.
2764 bool fat_proj_seen = false;
2765 uint last_safept = _bb_end-1;
2766 Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
2767 Node* last_safept_node = end_node;
2768 for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2769 Node *n = b->_nodes[i];
2770 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
2771 if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2772 // Fat-proj kills a slew of registers
2773 // This can add edges to 'n' and obscure whether or not it was a def,
2774 // hence the is_def flag.
2775 fat_proj_seen = true;
2776 RegMask rm = n->out_RegMask();// Make local copy
2777 while( rm.is_NotEmpty() ) {
2778 OptoReg::Name kill = rm.find_first_elem();
2779 rm.Remove(kill);
2780 anti_do_def( b, n, kill, is_def );
2781 }
2782 } else {
2783 // Get DEF'd registers the normal way
2784 anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2785 anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2786 }
2787
2788 // Kill projections on a branch should appear to occur on the
2789 // branch, not afterwards, so grab the masks from the projections
2798 rm.Remove(kill);
2799 anti_do_def( b, n, kill, false );
2800 }
2801 }
2802 }
2803 }
2804
2805 // Check each register used by this instruction for a following DEF/KILL
2806 // that must occur afterward and requires an anti-dependence edge.
2807 for( uint j=0; j<n->req(); j++ ) {
2808 Node *def = n->in(j);
2809 if( def ) {
2810 assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
2811 anti_do_use( b, n, _regalloc->get_reg_first(def) );
2812 anti_do_use( b, n, _regalloc->get_reg_second(def) );
2813 }
2814 }
2815 // Do not allow defs of new derived values to float above GC
2816 // points unless the base is definitely available at the GC point.
2817
2818 Node *m = b->_nodes[i];
2819
2820 // Add precedence edge from following safepoint to use of derived pointer
2821 if( last_safept_node != end_node &&
2822 m != last_safept_node) {
2823 for (uint k = 1; k < m->req(); k++) {
2824 const Type *t = m->in(k)->bottom_type();
2825 if( t->isa_oop_ptr() &&
2826 t->is_ptr()->offset() != 0 ) {
2827 last_safept_node->add_prec( m );
2828 break;
2829 }
2830 }
2831 }
2832
2833 if( n->jvms() ) { // Precedence edge from derived to safept
2834 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2835 if( b->_nodes[last_safept] != last_safept_node ) {
2836 last_safept = b->find_node(last_safept_node);
2837 }
2838 for( uint j=last_safept; j > i; j-- ) {
2839 Node *mach = b->_nodes[j];
2840 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2841 mach->add_prec( n );
2842 }
2843 last_safept = i;
2844 last_safept_node = m;
2845 }
2846 }
2847
2848 if (fat_proj_seen) {
2849 // Garbage collect pinch nodes that were not consumed.
2850 // They are usually created by a fat kill MachProj for a call.
2851 garbage_collect_pinch_nodes();
2852 }
2853 }
2854
2855 // Garbage collect pinch nodes for reuse by other blocks.
2856 //
2857 // The block scheduler's insertion of anti-dependence
2858 // edges creates many pinch nodes when the block contains
2859 // 2 or more Calls. A pinch node is used to prevent a
|
40 #include "opto/subnode.hpp"
41 #include "opto/type.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "utilities/xmlstream.hpp"
44
45 extern uint size_exception_handler();
46 extern uint size_deopt_handler();
47
48 #ifndef PRODUCT
49 #define DEBUG_ARG(x) , x
50 #else
51 #define DEBUG_ARG(x)
52 #endif
53
54 extern int emit_exception_handler(CodeBuffer &cbuf);
55 extern int emit_deopt_handler(CodeBuffer &cbuf);
56
57 // Convert Nodes to instruction bits and pass off to the VM
58 void Compile::Output() {
59 // RootNode goes
60 assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
61
62 // The number of new nodes (mostly MachNop) is proportional to
63 // the number of java calls and inner loops which are aligned.
64 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
65 C->inner_loops()*(OptoLoopAlignment-1)),
66 "out of nodes before code generation" ) ) {
67 return;
68 }
69 // Make sure I can find the Start Node
70 Block *entry = _cfg->get_block(1);
71 Block *broot = _cfg->get_root_block();
72
73 const StartNode *start = entry->head()->as_Start();
74
75 // Replace StartNode with prolog
76 MachPrologNode *prolog = new (this) MachPrologNode();
77 entry->map_node(prolog, 0);
78 _cfg->map_node_to_block(prolog, entry);
79 _cfg->unmap_node_from_block(start); // start is no longer in any block
80
81 // Virtual methods need an unverified entry point
82
83 if( is_osr_compilation() ) {
84 if( PoisonOSREntry ) {
85 // TODO: Should use a ShouldNotReachHereNode...
86 _cfg->insert( broot, 0, new (this) MachBreakpointNode() );
87 }
88 } else {
89 if( _method && !_method->flags().is_static() ) {
90 // Insert unvalidated entry point
91 _cfg->insert( broot, 0, new (this) MachUEPNode() );
92 }
93
94 }
95
96
97 // Break before main entry point
127 # endif
128
129 uint* blk_starts = NEW_RESOURCE_ARRAY(uint, _cfg->number_of_blocks() + 1);
130 blk_starts[0] = 0;
131
132 // Initialize code buffer and process short branches.
133 CodeBuffer* cb = init_buffer(blk_starts);
134
135 if (cb == NULL || failing()) {
136 return;
137 }
138
139 ScheduleAndBundle();
140
141 #ifndef PRODUCT
142 if (trace_opto_output()) {
143 tty->print("\n---- After ScheduleAndBundle ----\n");
144 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
145 tty->print("\nBB#%03d:\n", i);
146 Block* block = _cfg->get_block(i);
147 for (uint j = 0; j < block->number_of_nodes(); j++) {
148 Node* n = block->get_node(j);
149 OptoReg::Name reg = _regalloc->get_reg_first(n);
150 tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
151 n->dump();
152 }
153 }
154 }
155 #endif
156
157 if (failing()) {
158 return;
159 }
160
161 BuildOopMaps();
162
163 if (failing()) {
164 return;
165 }
166
167 fill_buffer(cb, blk_starts);
168 }
209 else if ( CompileZapFirst > CompiledZap_count() ) skip = true;
210 else if ( CompileZapFirst == CompiledZap_count() )
211 warning("starting zap compilation after skipping");
212
213 if ( CompileZapLast == -1 ) ; // nothing special
214 else if ( CompileZapLast < CompiledZap_count() ) skip = true;
215 else if ( CompileZapLast == CompiledZap_count() )
216 warning("about to compile last zap");
217
218 ++_CompiledZap_count; // counts skipped zaps, too
219
220 if ( skip ) return;
221
222
223 if ( _method == NULL )
224 return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
225
226 // Insert call to zap runtime stub before every node with an oop map
227 for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
228 Block *b = _cfg->get_block(i);
229 for ( uint j = 0; j < b->number_of_nodes(); ++j ) {
230 Node *n = b->get_node(j);
231
232 // Determining if we should insert a zap-a-lot node in output.
233 // We do that for all nodes that has oopmap info, except for calls
234 // to allocation. Calls to allocation passes in the old top-of-eden pointer
235 // and expect the C code to reset it. Hence, there can be no safepoints between
236 // the inlined-allocation and the call to new_Java, etc.
237 // We also cannot zap monitor calls, as they must hold the microlock
238 // during the call to Zap, which also wants to grab the microlock.
239 bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
240 if ( insert ) { // it is MachSafePoint
241 if ( !n->is_MachCall() ) {
242 insert = false;
243 } else if ( n->is_MachCall() ) {
244 MachCallNode* call = n->as_MachCall();
245 if (call->entry_point() == OptoRuntime::new_instance_Java() ||
246 call->entry_point() == OptoRuntime::new_array_Java() ||
247 call->entry_point() == OptoRuntime::multianewarray2_Java() ||
248 call->entry_point() == OptoRuntime::multianewarray3_Java() ||
249 call->entry_point() == OptoRuntime::multianewarray4_Java() ||
250 call->entry_point() == OptoRuntime::multianewarray5_Java() ||
251 call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
252 call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
253 ) {
254 insert = false;
255 }
256 }
257 if (insert) {
258 Node *zap = call_zap_node(n->as_MachSafePoint(), i);
259 b->insert_node(zap, j);
260 _cfg->map_node_to_block(zap, b);
261 ++j;
262 }
263 }
264 }
265 }
266 }
267
268
269 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
270 const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
271 CallStaticJavaNode* ideal_node =
272 new (this) CallStaticJavaNode( tf,
273 OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
274 "call zap dead locals stub", 0, TypePtr::BOTTOM);
275 // We need to copy the OopMap from the site we're zapping at.
276 // We have to make a copy, because the zap site might not be
277 // a call site, and zap_dead is a call site.
278 OopMap* clone = node_to_check->oop_map()->deep_copy();
279
362 // third inserts nops where needed.
363
364 // Step one, perform a pessimistic sizing pass.
365 uint last_call_adr = max_uint;
366 uint last_avoid_back_to_back_adr = max_uint;
367 uint nop_size = (new (this) MachNopNode())->size(_regalloc);
368 for (uint i = 0; i < nblocks; i++) { // For all blocks
369 Block* block = _cfg->get_block(i);
370
371 // During short branch replacement, we store the relative (to blk_starts)
372 // offset of jump in jmp_offset, rather than the absolute offset of jump.
373 // This is so that we do not need to recompute sizes of all nodes when
374 // we compute correct blk_starts in our next sizing pass.
375 jmp_offset[i] = 0;
376 jmp_size[i] = 0;
377 jmp_nidx[i] = -1;
378 DEBUG_ONLY( jmp_target[i] = 0; )
379 DEBUG_ONLY( jmp_rule[i] = 0; )
380
381 // Sum all instruction sizes to compute block size
382 uint last_inst = block->number_of_nodes();
383 uint blk_size = 0;
384 for (uint j = 0; j < last_inst; j++) {
385 Node* nj = block->get_node(j);
386 // Handle machine instruction nodes
387 if (nj->is_Mach()) {
388 MachNode *mach = nj->as_Mach();
389 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
390 reloc_size += mach->reloc();
391 if (mach->is_MachCall()) {
392 MachCallNode *mcall = mach->as_MachCall();
393 // This destination address is NOT PC-relative
394
395 mcall->method_set((intptr_t)mcall->entry_point());
396
397 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
398 stub_size += CompiledStaticCall::to_interp_stub_size();
399 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
400 }
401 } else if (mach->is_MachSafePoint()) {
402 // If call/safepoint are adjacent, account for possible
403 // nop to disambiguate the two safepoints.
404 // ScheduleAndBundle() can rearrange nodes in a block,
405 // check for all offsets inside this block.
460 last_avoid_back_to_back_adr += max_loop_pad;
461 }
462 blk_size += max_loop_pad;
463 }
464 }
465
466 // Save block size; update total method size
467 blk_starts[i+1] = blk_starts[i]+blk_size;
468 }
469
470 // Step two, replace eligible long jumps.
471 bool progress = true;
472 uint last_may_be_short_branch_adr = max_uint;
473 while (has_short_branch_candidate && progress) {
474 progress = false;
475 has_short_branch_candidate = false;
476 int adjust_block_start = 0;
477 for (uint i = 0; i < nblocks; i++) {
478 Block* block = _cfg->get_block(i);
479 int idx = jmp_nidx[i];
480 MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
481 if (mach != NULL && mach->may_be_short_branch()) {
482 #ifdef ASSERT
483 assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
484 int j;
485 // Find the branch; ignore trailing NOPs.
486 for (j = block->number_of_nodes()-1; j>=0; j--) {
487 Node* n = block->get_node(j);
488 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
489 break;
490 }
491 assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
492 #endif
493 int br_size = jmp_size[i];
494 int br_offs = blk_starts[i] + jmp_offset[i];
495
496 // This requires the TRUE branch target be in succs[0]
497 uint bnum = block->non_connector_successor(0)->_pre_order;
498 int offset = blk_starts[bnum] - br_offs;
499 if (bnum > i) { // adjust following block's offset
500 offset -= adjust_block_start;
501 }
502 // In the following code a nop could be inserted before
503 // the branch which will increase the backward distance.
504 bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
505 if (needs_padding && offset <= 0)
506 offset -= nop_size;
507
508 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
509 // We've got a winner. Replace this branch.
510 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
511
512 // Update the jmp_size.
513 int new_size = replacement->size(_regalloc);
514 int diff = br_size - new_size;
515 assert(diff >= (int)nop_size, "short_branch size should be smaller");
516 // Conservatively take into accound padding between
517 // avoid_back_to_back branches. Previous branch could be
518 // converted into avoid_back_to_back branch during next
519 // rounds.
520 if (needs_padding && replacement->avoid_back_to_back()) {
521 jmp_offset[i] += nop_size;
522 diff -= nop_size;
523 }
524 adjust_block_start += diff;
525 block->map_node(replacement, idx);
526 mach->subsume_by(replacement, C);
527 mach = replacement;
528 progress = true;
529
530 jmp_size[i] = new_size;
531 DEBUG_ONLY( jmp_target[i] = bnum; );
532 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
533 } else {
534 // The jump distance is not short, try again during next iteration.
535 has_short_branch_candidate = true;
536 }
537 } // (mach->may_be_short_branch())
538 if (mach != NULL && (mach->may_be_short_branch() ||
539 mach->avoid_back_to_back())) {
540 last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
541 }
542 blk_starts[i+1] -= adjust_block_start;
543 }
544 }
545
1071 // The usual spill mechanism can only generate stfd's in this case, which
1072 // doesn't work if the fp reg to spill contains a single-precision denorm.
1073 // Instead, we hack around the normal spill mechanism using stfspill's and
1074 // ldffill's in the MachProlog and MachEpilog emit methods. We allocate
1075 // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1076 //
1077 // If we ever implement 16-byte 'registers' == stack slots, we can
1078 // get rid of this hack and have SpillCopy generate stfspill/ldffill
1079 // instead of stfd/stfs/ldfd/ldfs.
1080 _frame_slots += 8*(16/BytesPerInt);
1081 }
1082 #endif
1083 assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
1084
1085 if (has_mach_constant_base_node()) {
1086 // Fill the constant table.
1087 // Note: This must happen before shorten_branches.
1088 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1089 Block* b = _cfg->get_block(i);
1090
1091 for (uint j = 0; j < b->number_of_nodes(); j++) {
1092 Node* n = b->get_node(j);
1093
1094 // If the node is a MachConstantNode evaluate the constant
1095 // value section.
1096 if (n->is_MachConstant()) {
1097 MachConstantNode* machcon = n->as_MachConstant();
1098 machcon->eval_constant(C);
1099 }
1100 }
1101 }
1102
1103 // Calculate the offsets of the constants and the size of the
1104 // constant table (including the padding to the next section).
1105 constant_table().calculate_offsets_and_size();
1106 const_req = constant_table().size();
1107 }
1108
1109 // Initialize the space for the BufferBlob used to find and verify
1110 // instruction size in MachNode::emit_size()
1111 init_scratch_buffer_blob(const_req);
1112 if (failing()) return NULL; // Out of memory
1230 if (Pipeline::requires_bundling() && starts_bundle(head)) {
1231 cb->flush_bundle(true);
1232 }
1233
1234 #ifdef ASSERT
1235 if (!block->is_connector()) {
1236 stringStream st;
1237 block->dump_head(_cfg, &st);
1238 MacroAssembler(cb).block_comment(st.as_string());
1239 }
1240 jmp_target[i] = 0;
1241 jmp_offset[i] = 0;
1242 jmp_size[i] = 0;
1243 jmp_rule[i] = 0;
1244 #endif
1245 int blk_offset = current_offset;
1246
1247 // Define the label at the beginning of the basic block
1248 MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
1249
1250 uint last_inst = block->number_of_nodes();
1251
1252 // Emit block normally, except for last instruction.
1253 // Emit means "dump code bits into code buffer".
1254 for (uint j = 0; j<last_inst; j++) {
1255
1256 // Get the node
1257 Node* n = block->get_node(j);
1258
1259 // See if delay slots are supported
1260 if (valid_bundle_info(n) &&
1261 node_bundling(n)->used_in_unconditional_delay()) {
1262 assert(delay_slot == NULL, "no use of delay slot node");
1263 assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
1264
1265 delay_slot = n;
1266 continue;
1267 }
1268
1269 // If this starts a new instruction group, then flush the current one
1270 // (but allow split bundles)
1271 if (Pipeline::requires_bundling() && starts_bundle(n))
1272 cb->flush_bundle(false);
1273
1274 // The following logic is duplicated in the code ifdeffed for
1275 // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It
1276 // should be factored out. Or maybe dispersed to the nodes?
1277
1291 // A padding may be needed again since a previous instruction
1292 // could be moved to delay slot.
1293
1294 // align the instruction if necessary
1295 int padding = mach->compute_padding(current_offset);
1296 // Make sure safepoint node for polling is distinct from a call's
1297 // return by adding a nop if needed.
1298 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1299 padding = nop_size;
1300 }
1301 if (padding == 0 && mach->avoid_back_to_back() &&
1302 current_offset == last_avoid_back_to_back_offset) {
1303 // Avoid back to back some instructions.
1304 padding = nop_size;
1305 }
1306
1307 if(padding > 0) {
1308 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1309 int nops_cnt = padding / nop_size;
1310 MachNode *nop = new (this) MachNopNode(nops_cnt);
1311 block->insert_node(nop, j++);
1312 last_inst++;
1313 _cfg->map_node_to_block(nop, block);
1314 nop->emit(*cb, _regalloc);
1315 cb->flush_bundle(true);
1316 current_offset = cb->insts_size();
1317 }
1318
1319 // Remember the start of the last call in a basic block
1320 if (is_mcall) {
1321 MachCallNode *mcall = mach->as_MachCall();
1322
1323 // This destination address is NOT PC-relative
1324 mcall->method_set((intptr_t)mcall->entry_point());
1325
1326 // Save the return address
1327 call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
1328
1329 if (mcall->is_MachCallLeaf()) {
1330 is_mcall = false;
1331 is_sfn = false;
1377 // finilized yet, adjust distance by the difference
1378 // between calculated and final offsets of current block.
1379 offset -= (blk_starts[i] - blk_offset);
1380 }
1381 // In the following code a nop could be inserted before
1382 // the branch which will increase the backward distance.
1383 bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1384 if (needs_padding && offset <= 0)
1385 offset -= nop_size;
1386
1387 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1388 // We've got a winner. Replace this branch.
1389 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
1390
1391 // Update the jmp_size.
1392 int new_size = replacement->size(_regalloc);
1393 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1394 // Insert padding between avoid_back_to_back branches.
1395 if (needs_padding && replacement->avoid_back_to_back()) {
1396 MachNode *nop = new (this) MachNopNode();
1397 block->insert_node(nop, j++);
1398 _cfg->map_node_to_block(nop, block);
1399 last_inst++;
1400 nop->emit(*cb, _regalloc);
1401 cb->flush_bundle(true);
1402 current_offset = cb->insts_size();
1403 }
1404 #ifdef ASSERT
1405 jmp_target[i] = block_num;
1406 jmp_offset[i] = current_offset - blk_offset;
1407 jmp_size[i] = new_size;
1408 jmp_rule[i] = mach->rule();
1409 #endif
1410 block->map_node(replacement, j);
1411 mach->subsume_by(replacement, C);
1412 n = replacement;
1413 mach = replacement;
1414 }
1415 }
1416 mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
1417 } else if (mach->ideal_Opcode() == Op_Jump) {
1418 for (uint h = 0; h < block->_num_succs; h++) {
1419 Block* succs_block = block->_succs[h];
1420 for (uint j = 1; j < succs_block->num_preds(); j++) {
1421 Node* jpn = succs_block->pred(j);
1422 if (jpn->is_JumpProj() && jpn->in(0) == mach) {
1423 uint block_num = succs_block->non_connector()->_pre_order;
1424 Label *blkLabel = &blk_labels[block_num];
1425 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1426 }
1427 }
1428 }
1429 }
1430 #ifdef ASSERT
1431 // Check that oop-store precedes the card-mark
1432 else if (mach->ideal_Opcode() == Op_StoreCM) {
1433 uint storeCM_idx = j;
1434 int count = 0;
1435 for (uint prec = mach->req(); prec < mach->len(); prec++) {
1436 Node *oop_store = mach->in(prec); // Precedence edge
1437 if (oop_store == NULL) continue;
1438 count++;
1439 uint i4;
1440 for (i4 = 0; i4 < last_inst; ++i4) {
1441 if (block->get_node(i4) == oop_store) {
1442 break;
1443 }
1444 }
1445 // Note: This test can provide a false failure if other precedence
1446 // edges have been added to the storeCMNode.
1447 assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
1448 }
1449 assert(count > 0, "storeCM expects at least one precedence edge");
1450 }
1451 #endif
1452 else if (!n->is_Proj()) {
1453 // Remember the beginning of the previous instruction, in case
1454 // it's followed by a flag-kill and a null-check. Happens on
1455 // Intel all the time, with add-to-memory kind of opcodes.
1456 previous_offset = current_offset;
1457 }
1458 }
1459
1460 // Verify that there is sufficient space remaining
1461 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1531 // Generate an OopMap entry
1532 Process_OopMap_Node(mach, adjusted_offset);
1533 }
1534
1535 // Insert the delay slot instruction
1536 delay_slot->emit(*cb, _regalloc);
1537
1538 // Don't reuse it
1539 delay_slot = NULL;
1540 }
1541
1542 } // End for all instructions in block
1543
1544 // If the next block is the top of a loop, pad this block out to align
1545 // the loop top a little. Helps prevent pipe stalls at loop back branches.
1546 if (i < nblocks-1) {
1547 Block *nb = _cfg->get_block(i + 1);
1548 int padding = nb->alignment_padding(current_offset);
1549 if( padding > 0 ) {
1550 MachNode *nop = new (this) MachNopNode(padding / nop_size);
1551 block->insert_node(nop, block->number_of_nodes());
1552 _cfg->map_node_to_block(nop, block);
1553 nop->emit(*cb, _regalloc);
1554 current_offset = cb->insts_size();
1555 }
1556 }
1557 // Verify that the distance for generated before forward
1558 // short branches is still valid.
1559 guarantee((int)(blk_starts[i+1] - blk_starts[i]) >= (current_offset - blk_offset), "shouldn't increase block size");
1560
1561 // Save new block start offset
1562 blk_starts[i] = blk_offset;
1563 } // End of for all blocks
1564 blk_starts[nblocks] = current_offset;
1565
1566 non_safepoints.flush_at_end();
1567
1568 // Offset too large?
1569 if (failing()) return;
1570
1571 // Define a pseudo-label at the end of the code
1638 dump_asm(node_offsets, node_offset_limit);
1639 if (xtty != NULL) {
1640 xtty->tail("opto_assembly");
1641 }
1642 }
1643 }
1644 #endif
1645
1646 }
1647
1648 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1649 _inc_table.set_size(cnt);
1650
1651 uint inct_cnt = 0;
1652 for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
1653 Block* block = _cfg->get_block(i);
1654 Node *n = NULL;
1655 int j;
1656
1657 // Find the branch; ignore trailing NOPs.
1658 for (j = block->number_of_nodes() - 1; j >= 0; j--) {
1659 n = block->get_node(j);
1660 if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
1661 break;
1662 }
1663 }
1664
1665 // If we didn't find anything, continue
1666 if (j < 0) {
1667 continue;
1668 }
1669
1670 // Compute ExceptionHandlerTable subtable entry and add it
1671 // (skip empty blocks)
1672 if (n->is_Catch()) {
1673
1674 // Get the offset of the return from the call
1675 uint call_return = call_returns[block->_pre_order];
1676 #ifdef ASSERT
1677 assert( call_return > 0, "no call seen for this basic block" );
1678 while (block->get_node(--j)->is_MachProj()) ;
1679 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
1680 #endif
1681 // last instruction is a CatchNode, find it's CatchProjNodes
1682 int nof_succs = block->_num_succs;
1683 // allocate space
1684 GrowableArray<intptr_t> handler_bcis(nof_succs);
1685 GrowableArray<intptr_t> handler_pcos(nof_succs);
1686 // iterate through all successors
1687 for (int j = 0; j < nof_succs; j++) {
1688 Block* s = block->_succs[j];
1689 bool found_p = false;
1690 for (uint k = 1; k < s->num_preds(); k++) {
1691 Node* pk = s->pred(k);
1692 if (pk->is_CatchProj() && pk->in(0) == n) {
1693 const CatchProjNode* p = pk->as_CatchProj();
1694 found_p = true;
1695 // add the corresponding handler bci & pco information
1696 if (p->_con != CatchProjNode::fall_through_index) {
1697 // p leads to an exception handler (and is not fall through)
1698 assert(s == _cfg->get_block(s->_pre_order), "bad numbering");
1699 // no duplicates, please
1765 // This one is persistent within the Compile class
1766 _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
1767
1768 // Allocate space for fixed-size arrays
1769 _node_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1770 _uses = NEW_ARENA_ARRAY(arena, short, node_max);
1771 _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1772
1773 // Clear the arrays
1774 memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
1775 memset(_node_latency, 0, node_max * sizeof(unsigned short));
1776 memset(_uses, 0, node_max * sizeof(short));
1777 memset(_current_latency, 0, node_max * sizeof(unsigned short));
1778
1779 // Clear the bundling information
1780 memcpy(_bundle_use_elements, Pipeline_Use::elaborated_elements, sizeof(Pipeline_Use::elaborated_elements));
1781
1782 // Get the last node
1783 Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
1784
1785 _next_node = block->get_node(block->number_of_nodes() - 1);
1786 }
1787
1788 #ifndef PRODUCT
1789 // Scheduling destructor
1790 Scheduling::~Scheduling() {
1791 _total_branches += _branches;
1792 _total_unconditional_delays += _unconditional_delays;
1793 }
1794 #endif
1795
1796 // Step ahead "i" cycles
1797 void Scheduling::step(uint i) {
1798
1799 Bundle *bundle = node_bundling(_next_node);
1800 bundle->set_starts_bundle();
1801
1802 // Update the bundle record, but leave the flags information alone
1803 if (_bundle_instr_count > 0) {
1804 bundle->set_instr_count(_bundle_instr_count);
1805 bundle->set_resources_used(_bundle_use.resourcesUsed());
1858 scheduling.DoScheduling();
1859 }
1860
1861 // Compute the latency of all the instructions. This is fairly simple,
1862 // because we already have a legal ordering. Walk over the instructions
1863 // from first to last, and compute the latency of the instruction based
1864 // on the latency of the preceding instruction(s).
1865 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1866 #ifndef PRODUCT
1867 if (_cfg->C->trace_opto_output())
1868 tty->print("# -> ComputeLocalLatenciesForward\n");
1869 #endif
1870
1871 // Walk over all the schedulable instructions
1872 for( uint j=_bb_start; j < _bb_end; j++ ) {
1873
1874 // This is a kludge, forcing all latency calculations to start at 1.
1875 // Used to allow latency 0 to force an instruction to the beginning
1876 // of the bb
1877 uint latency = 1;
1878 Node *use = bb->get_node(j);
1879 uint nlen = use->len();
1880
1881 // Walk over all the inputs
1882 for ( uint k=0; k < nlen; k++ ) {
1883 Node *def = use->in(k);
1884 if (!def)
1885 continue;
1886
1887 uint l = _node_latency[def->_idx] + use->latency(k);
1888 if (latency < l)
1889 latency = l;
1890 }
1891
1892 _node_latency[use->_idx] = latency;
1893
1894 #ifndef PRODUCT
1895 if (_cfg->C->trace_opto_output()) {
1896 tty->print("# latency %4d: ", latency);
1897 use->dump();
1898 }
2269
2270 // Increment the number of instructions in this bundle
2271 _bundle_instr_count += instruction_count;
2272
2273 // Remember this node for later
2274 if (n->is_Mach())
2275 _next_node = n;
2276 }
2277
2278 // It's possible to have a BoxLock in the graph and in the _bbs mapping but
2279 // not in the bb->_nodes array. This happens for debug-info-only BoxLocks.
2280 // 'Schedule' them (basically ignore in the schedule) but do not insert them
2281 // into the block. All other scheduled nodes get put in the schedule here.
2282 int op = n->Opcode();
2283 if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
2284 (op != Op_Node && // Not an unused antidepedence node and
2285 // not an unallocated boxlock
2286 (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
2287
2288 // Push any trailing projections
2289 if( bb->get_node(bb->number_of_nodes()-1) != n ) {
2290 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2291 Node *foi = n->fast_out(i);
2292 if( foi->is_Proj() )
2293 _scheduled.push(foi);
2294 }
2295 }
2296
2297 // Put the instruction in the schedule list
2298 _scheduled.push(n);
2299 }
2300
2301 #ifndef PRODUCT
2302 if (_cfg->C->trace_opto_output())
2303 dump_available();
2304 #endif
2305
2306 // Walk all the definitions, decrementing use counts, and
2307 // if a definition has a 0 use count, place it in the available list.
2308 DecrementUseCounts(n,bb);
2309 }
2312 // uses outside the current basic block. As we are doing a backwards walk,
2313 // any node we reach that has a use count of 0 may be scheduled. This also
2314 // avoids the problem of cyclic references from phi nodes, as long as phi
2315 // nodes are at the front of the basic block. This method also initializes
2316 // the available list to the set of instructions that have no uses within this
2317 // basic block.
2318 void Scheduling::ComputeUseCount(const Block *bb) {
2319 #ifndef PRODUCT
2320 if (_cfg->C->trace_opto_output())
2321 tty->print("# -> ComputeUseCount\n");
2322 #endif
2323
2324 // Clear the list of available and scheduled instructions, just in case
2325 _available.clear();
2326 _scheduled.clear();
2327
2328 // No delay slot specified
2329 _unconditional_delay_slot = NULL;
2330
2331 #ifdef ASSERT
2332 for( uint i=0; i < bb->number_of_nodes(); i++ )
2333 assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
2334 #endif
2335
2336 // Force the _uses count to never go to zero for unscheduable pieces
2337 // of the block
2338 for( uint k = 0; k < _bb_start; k++ )
2339 _uses[bb->get_node(k)->_idx] = 1;
2340 for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
2341 _uses[bb->get_node(l)->_idx] = 1;
2342
2343 // Iterate backwards over the instructions in the block. Don't count the
2344 // branch projections at end or the block header instructions.
2345 for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
2346 Node *n = bb->get_node(j);
2347 if( n->is_Proj() ) continue; // Projections handled another way
2348
2349 // Account for all uses
2350 for ( uint k = 0; k < n->len(); k++ ) {
2351 Node *inp = n->in(k);
2352 if (!inp) continue;
2353 assert(inp != n, "no cycles allowed" );
2354 if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
2355 if (inp->is_Proj()) { // Skip through Proj's
2356 inp = inp->in(0);
2357 }
2358 ++_uses[inp->_idx]; // Count 1 block-local use
2359 }
2360 }
2361
2362 // If this instruction has a 0 use count, then it is available
2363 if (!_uses[n->_idx]) {
2364 _current_latency[n->_idx] = _bundle_cycle_number;
2365 AddNodeToAvailableList(n);
2366 }
2381
2382 // This routine performs scheduling on each basic block in reverse order,
2383 // using instruction latencies and taking into account function unit
2384 // availability.
2385 void Scheduling::DoScheduling() {
2386 #ifndef PRODUCT
2387 if (_cfg->C->trace_opto_output())
2388 tty->print("# -> DoScheduling\n");
2389 #endif
2390
2391 Block *succ_bb = NULL;
2392 Block *bb;
2393
2394 // Walk over all the basic blocks in reverse order
2395 for (int i = _cfg->number_of_blocks() - 1; i >= 0; succ_bb = bb, i--) {
2396 bb = _cfg->get_block(i);
2397
2398 #ifndef PRODUCT
2399 if (_cfg->C->trace_opto_output()) {
2400 tty->print("# Schedule BB#%03d (initial)\n", i);
2401 for (uint j = 0; j < bb->number_of_nodes(); j++) {
2402 bb->get_node(j)->dump();
2403 }
2404 }
2405 #endif
2406
2407 // On the head node, skip processing
2408 if (bb == _cfg->get_root_block()) {
2409 continue;
2410 }
2411
2412 // Skip empty, connector blocks
2413 if (bb->is_connector())
2414 continue;
2415
2416 // If the following block is not the sole successor of
2417 // this one, then reset the pipeline information
2418 if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
2419 #ifndef PRODUCT
2420 if (_cfg->C->trace_opto_output()) {
2421 tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
2422 _next_node->_idx, _bundle_instr_count);
2423 }
2424 #endif
2425 step_and_clear();
2426 }
2427
2428 // Leave untouched the starting instruction, any Phis, a CreateEx node
2429 // or Top. bb->get_node(_bb_start) is the first schedulable instruction.
2430 _bb_end = bb->number_of_nodes()-1;
2431 for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
2432 Node *n = bb->get_node(_bb_start);
2433 // Things not matched, like Phinodes and ProjNodes don't get scheduled.
2434 // Also, MachIdealNodes do not get scheduled
2435 if( !n->is_Mach() ) continue; // Skip non-machine nodes
2436 MachNode *mach = n->as_Mach();
2437 int iop = mach->ideal_Opcode();
2438 if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2439 if( iop == Op_Con ) continue; // Do not schedule Top
2440 if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
2441 mach->pipeline() == MachNode::pipeline_class() &&
2442 !n->is_SpillCopy() ) // Breakpoints, Prolog, etc
2443 continue;
2444 break; // Funny loop structure to be sure...
2445 }
2446 // Compute last "interesting" instruction in block - last instruction we
2447 // might schedule. _bb_end points just after last schedulable inst. We
2448 // normally schedule conditional branches (despite them being forced last
2449 // in the block), because they have delay slots we can fill. Calls all
2450 // have their delay slots filled in the template expansions, so we don't
2451 // bother scheduling them.
2452 Node *last = bb->get_node(_bb_end);
2453 // Ignore trailing NOPs.
2454 while (_bb_end > 0 && last->is_Mach() &&
2455 last->as_Mach()->ideal_Opcode() == Op_Con) {
2456 last = bb->get_node(--_bb_end);
2457 }
2458 assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
2459 if( last->is_Catch() ||
2460 // Exclude unreachable path case when Halt node is in a separate block.
2461 (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2462 // There must be a prior call. Skip it.
2463 while( !bb->get_node(--_bb_end)->is_MachCall() ) {
2464 assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
2465 }
2466 } else if( last->is_MachNullCheck() ) {
2467 // Backup so the last null-checked memory instruction is
2468 // outside the schedulable range. Skip over the nullcheck,
2469 // projection, and the memory nodes.
2470 Node *mem = last->in(1);
2471 do {
2472 _bb_end--;
2473 } while (mem != bb->get_node(_bb_end));
2474 } else {
2475 // Set _bb_end to point after last schedulable inst.
2476 _bb_end++;
2477 }
2478
2479 assert( _bb_start <= _bb_end, "inverted block ends" );
2480
2481 // Compute the register antidependencies for the basic block
2482 ComputeRegisterAntidependencies(bb);
2483 if (_cfg->C->failing()) return; // too many D-U pinch points
2484
2485 // Compute intra-bb latencies for the nodes
2486 ComputeLocalLatenciesForward(bb);
2487
2488 // Compute the usage within the block, and set the list of all nodes
2489 // in the block that have no uses within the block.
2490 ComputeUseCount(bb);
2491
2492 // Schedule the remaining instructions in the block
2493 while ( _available.size() > 0 ) {
2494 Node *n = ChooseNodeToBundle();
2495 guarantee(n != NULL, "no nodes available");
2496 AddNodeToBundle(n,bb);
2497 }
2498
2499 assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
2500 #ifdef ASSERT
2501 for( uint l = _bb_start; l < _bb_end; l++ ) {
2502 Node *n = bb->get_node(l);
2503 uint m;
2504 for( m = 0; m < _bb_end-_bb_start; m++ )
2505 if( _scheduled[m] == n )
2506 break;
2507 assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
2508 }
2509 #endif
2510
2511 // Now copy the instructions (in reverse order) back to the block
2512 for ( uint k = _bb_start; k < _bb_end; k++ )
2513 bb->map_node(_scheduled[_bb_end-k-1], k);
2514
2515 #ifndef PRODUCT
2516 if (_cfg->C->trace_opto_output()) {
2517 tty->print("# Schedule BB#%03d (final)\n", i);
2518 uint current = 0;
2519 for (uint j = 0; j < bb->number_of_nodes(); j++) {
2520 Node *n = bb->get_node(j);
2521 if( valid_bundle_info(n) ) {
2522 Bundle *bundle = node_bundling(n);
2523 if (bundle->instr_count() > 0 || bundle->flags() > 0) {
2524 tty->print("*** Bundle: ");
2525 bundle->dump();
2526 }
2527 n->dump();
2528 }
2529 }
2530 }
2531 #endif
2532 #ifdef ASSERT
2533 verify_good_schedule(bb,"after block local scheduling");
2534 #endif
2535 }
2536
2537 #ifndef PRODUCT
2538 if (_cfg->C->trace_opto_output())
2539 tty->print("# <- DoScheduling\n");
2540 #endif
2562 Node *prior_use = _reg_node[def];
2563 if( prior_use && !edge_from_to(prior_use,n) ) {
2564 tty->print("%s = ",OptoReg::as_VMReg(def)->name());
2565 n->dump();
2566 tty->print_cr("...");
2567 prior_use->dump();
2568 assert(edge_from_to(prior_use,n),msg);
2569 }
2570 _reg_node.map(def,NULL); // Kill live USEs
2571 }
2572 }
2573
2574 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
2575
2576 // Zap to something reasonable for the verify code
2577 _reg_node.clear();
2578
2579 // Walk over the block backwards. Check to make sure each DEF doesn't
2580 // kill a live value (other than the one it's supposed to). Add each
2581 // USE to the live set.
2582 for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
2583 Node *n = b->get_node(i);
2584 int n_op = n->Opcode();
2585 if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2586 // Fat-proj kills a slew of registers
2587 RegMask rm = n->out_RegMask();// Make local copy
2588 while( rm.is_NotEmpty() ) {
2589 OptoReg::Name kill = rm.find_first_elem();
2590 rm.Remove(kill);
2591 verify_do_def( n, kill, msg );
2592 }
2593 } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
2594 // Get DEF'd registers the normal way
2595 verify_do_def( n, _regalloc->get_reg_first(n), msg );
2596 verify_do_def( n, _regalloc->get_reg_second(n), msg );
2597 }
2598
2599 // Now make all USEs live
2600 for( uint i=1; i<n->req(); i++ ) {
2601 Node *def = n->in(i);
2602 assert(def != 0, "input edge required");
2603 OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
2694 }
2695 }
2696 }
2697
2698 // Add edge from kill to pinch-point
2699 add_prec_edge_from_to(kill,pinch);
2700 }
2701
2702 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
2703 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
2704 return;
2705 Node *pinch = _reg_node[use_reg]; // Get pinch point
2706 // Check for no later def_reg/kill in block
2707 if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
2708 // Use has to be block-local as well
2709 _cfg->get_block_for_node(use) == b) {
2710 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2711 pinch->req() == 1 ) { // pinch not yet in block?
2712 pinch->del_req(0); // yank pointer to later-def, also set flag
2713 // Insert the pinch-point in the block just after the last use
2714 b->insert_node(pinch, b->find_node(use) + 1);
2715 _bb_end++; // Increase size scheduled region in block
2716 }
2717
2718 add_prec_edge_from_to(pinch,use);
2719 }
2720 }
2721
2722 // We insert antidependences between the reads and following write of
2723 // allocated registers to prevent illegal code motion. Hopefully, the
2724 // number of added references should be fairly small, especially as we
2725 // are only adding references within the current basic block.
2726 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
2727
2728 #ifdef ASSERT
2729 verify_good_schedule(b,"before block local scheduling");
2730 #endif
2731
2732 // A valid schedule, for each register independently, is an endless cycle
2733 // of: a def, then some uses (connected to the def by true dependencies),
2734 // then some kills (defs with no uses), finally the cycle repeats with a new
2746
2747 // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
2748 // register. If not, we record the DEF/KILL in _reg_node, the
2749 // register-to-def mapping. If there is a prior DEF/KILL, we insert a
2750 // "pinch point", a new Node that's in the graph but not in the block.
2751 // We put edges from the prior and current DEF/KILLs to the pinch point.
2752 // We put the pinch point in _reg_node. If there's already a pinch point
2753 // we merely add an edge from the current DEF/KILL to the pinch point.
2754
2755 // After doing the DEF/KILLs, we handle USEs. For each used register, we
2756 // put an edge from the pinch point to the USE.
2757
2758 // To be expedient, the _reg_node array is pre-allocated for the whole
2759 // compilation. _reg_node is lazily initialized; it either contains a NULL,
2760 // or a valid def/kill/pinch-point, or a leftover node from some prior
2761 // block. Leftover node from some prior block is treated like a NULL (no
2762 // prior def, so no anti-dependence needed). Valid def is distinguished by
2763 // it being in the current block.
2764 bool fat_proj_seen = false;
2765 uint last_safept = _bb_end-1;
2766 Node* end_node = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
2767 Node* last_safept_node = end_node;
2768 for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2769 Node *n = b->get_node(i);
2770 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
2771 if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
2772 // Fat-proj kills a slew of registers
2773 // This can add edges to 'n' and obscure whether or not it was a def,
2774 // hence the is_def flag.
2775 fat_proj_seen = true;
2776 RegMask rm = n->out_RegMask();// Make local copy
2777 while( rm.is_NotEmpty() ) {
2778 OptoReg::Name kill = rm.find_first_elem();
2779 rm.Remove(kill);
2780 anti_do_def( b, n, kill, is_def );
2781 }
2782 } else {
2783 // Get DEF'd registers the normal way
2784 anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2785 anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2786 }
2787
2788 // Kill projections on a branch should appear to occur on the
2789 // branch, not afterwards, so grab the masks from the projections
2798 rm.Remove(kill);
2799 anti_do_def( b, n, kill, false );
2800 }
2801 }
2802 }
2803 }
2804
2805 // Check each register used by this instruction for a following DEF/KILL
2806 // that must occur afterward and requires an anti-dependence edge.
2807 for( uint j=0; j<n->req(); j++ ) {
2808 Node *def = n->in(j);
2809 if( def ) {
2810 assert( !def->is_MachProj() || def->ideal_reg() != MachProjNode::fat_proj, "" );
2811 anti_do_use( b, n, _regalloc->get_reg_first(def) );
2812 anti_do_use( b, n, _regalloc->get_reg_second(def) );
2813 }
2814 }
2815 // Do not allow defs of new derived values to float above GC
2816 // points unless the base is definitely available at the GC point.
2817
2818 Node *m = b->get_node(i);
2819
2820 // Add precedence edge from following safepoint to use of derived pointer
2821 if( last_safept_node != end_node &&
2822 m != last_safept_node) {
2823 for (uint k = 1; k < m->req(); k++) {
2824 const Type *t = m->in(k)->bottom_type();
2825 if( t->isa_oop_ptr() &&
2826 t->is_ptr()->offset() != 0 ) {
2827 last_safept_node->add_prec( m );
2828 break;
2829 }
2830 }
2831 }
2832
2833 if( n->jvms() ) { // Precedence edge from derived to safept
2834 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2835 if( b->get_node(last_safept) != last_safept_node ) {
2836 last_safept = b->find_node(last_safept_node);
2837 }
2838 for( uint j=last_safept; j > i; j-- ) {
2839 Node *mach = b->get_node(j);
2840 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2841 mach->add_prec( n );
2842 }
2843 last_safept = i;
2844 last_safept_node = m;
2845 }
2846 }
2847
2848 if (fat_proj_seen) {
2849 // Garbage collect pinch nodes that were not consumed.
2850 // They are usually created by a fat kill MachProj for a call.
2851 garbage_collect_pinch_nodes();
2852 }
2853 }
2854
2855 // Garbage collect pinch nodes for reuse by other blocks.
2856 //
2857 // The block scheduler's insertion of anti-dependence
2858 // edges creates many pinch nodes when the block contains
2859 // 2 or more Calls. A pinch node is used to prevent a
|