394 reloc_size += CallStubImpl::reloc_call_trampoline();
395
396 MachCallNode *mcall = mach->as_MachCall();
397 // This destination address is NOT PC-relative
398
399 mcall->method_set((intptr_t)mcall->entry_point());
400
401 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
402 stub_size += CompiledStaticCall::to_interp_stub_size();
403 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
404 }
405 } else if (mach->is_MachSafePoint()) {
406 // If call/safepoint are adjacent, account for possible
407 // nop to disambiguate the two safepoints.
408 // ScheduleAndBundle() can rearrange nodes in a block,
409 // check for all offsets inside this block.
410 if (last_call_adr >= blk_starts[i]) {
411 blk_size += nop_size;
412 }
413 }
414 if (mach->avoid_back_to_back()) {
415 // Nop is inserted between "avoid back to back" instructions.
416 // ScheduleAndBundle() can rearrange nodes in a block,
417 // check for all offsets inside this block.
418 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
419 blk_size += nop_size;
420 }
421 }
422 if (mach->may_be_short_branch()) {
423 if (!nj->is_MachBranch()) {
424 #ifndef PRODUCT
425 nj->dump(3);
426 #endif
427 Unimplemented();
428 }
429 assert(jmp_nidx[i] == -1, "block should have only one branch");
430 jmp_offset[i] = blk_size;
431 jmp_size[i] = nj->size(_regalloc);
432 jmp_nidx[i] = j;
433 has_short_branch_candidate = true;
434 }
435 }
436 blk_size += nj->size(_regalloc);
437 // Remember end of call offset
438 if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
439 last_call_adr = blk_starts[i]+blk_size;
440 }
441 // Remember end of avoid_back_to_back offset
442 if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back()) {
443 last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
444 }
445 }
446
447 // When the next block starts a loop, we may insert pad NOP
448 // instructions. Since we cannot know our future alignment,
449 // assume the worst.
450 if (i < nblocks - 1) {
451 Block* nb = _cfg->get_block(i + 1);
452 int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
453 if (max_loop_pad > 0) {
454 assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
455 // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
456 // If either is the last instruction in this block, bump by
457 // max_loop_pad in lock-step with blk_size, so sizing
458 // calculations in subsequent blocks still can conservatively
459 // detect that it may the last instruction in this block.
460 if (last_call_adr == blk_starts[i]+blk_size) {
461 last_call_adr += max_loop_pad;
462 }
508 // This block can be a loop header, account for the padding
509 // in the previous block.
510 int block_padding = block_worst_case_pad[i];
511 assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
512 // In the following code a nop could be inserted before
513 // the branch which will increase the backward distance.
514 bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
515 assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
516
517 if (needs_padding && offset <= 0)
518 offset -= nop_size;
519
520 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
521 // We've got a winner. Replace this branch.
522 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
523
524 // Update the jmp_size.
525 int new_size = replacement->size(_regalloc);
526 int diff = br_size - new_size;
527 assert(diff >= (int)nop_size, "short_branch size should be smaller");
528 // Conservatively take into accound padding between
529 // avoid_back_to_back branches. Previous branch could be
530 // converted into avoid_back_to_back branch during next
531 // rounds.
532 if (needs_padding && replacement->avoid_back_to_back()) {
533 jmp_offset[i] += nop_size;
534 diff -= nop_size;
535 }
536 adjust_block_start += diff;
537 block->map_node(replacement, idx);
538 mach->subsume_by(replacement, C);
539 mach = replacement;
540 progress = true;
541
542 jmp_size[i] = new_size;
543 DEBUG_ONLY( jmp_target[i] = bnum; );
544 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
545 } else {
546 // The jump distance is not short, try again during next iteration.
547 has_short_branch_candidate = true;
548 }
549 } // (mach->may_be_short_branch())
550 if (mach != NULL && (mach->may_be_short_branch() ||
551 mach->avoid_back_to_back())) {
552 last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
553 }
554 blk_starts[i+1] -= adjust_block_start;
555 }
556 }
557
558 #ifdef ASSERT
559 for (uint i = 0; i < nblocks; i++) { // For all blocks
560 if (jmp_target[i] != 0) {
561 int br_size = jmp_size[i];
562 int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
563 if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
564 tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
565 }
566 assert(_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
567 }
568 }
569 #endif
570
571 // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
1296 MachNode *mach = n->as_Mach();
1297 is_mcall = n->is_MachCall();
1298 bool is_sfn = n->is_MachSafePoint();
1299
1300 // If this requires all previous instructions be flushed, then do so
1301 if (is_sfn || is_mcall || mach->alignment_required() != 1) {
1302 cb->flush_bundle(true);
1303 current_offset = cb->insts_size();
1304 }
1305
1306 // A padding may be needed again since a previous instruction
1307 // could be moved to delay slot.
1308
1309 // align the instruction if necessary
1310 int padding = mach->compute_padding(current_offset);
1311 // Make sure safepoint node for polling is distinct from a call's
1312 // return by adding a nop if needed.
1313 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1314 padding = nop_size;
1315 }
1316 if (padding == 0 && mach->avoid_back_to_back() &&
1317 current_offset == last_avoid_back_to_back_offset) {
1318 // Avoid back to back some instructions.
1319 padding = nop_size;
1320 }
1321
1322 if(padding > 0) {
1323 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1324 int nops_cnt = padding / nop_size;
1325 MachNode *nop = new (this) MachNopNode(nops_cnt);
1326 block->insert_node(nop, j++);
1327 last_inst++;
1328 _cfg->map_node_to_block(nop, block);
1329 nop->emit(*cb, _regalloc);
1330 cb->flush_bundle(true);
1331 current_offset = cb->insts_size();
1332 }
1333
1334 // Remember the start of the last call in a basic block
1335 if (is_mcall) {
1336 MachCallNode *mcall = mach->as_MachCall();
1390 if (block_num >= i) {
1391 // Current and following block's offset are not
1392 // finalized yet, adjust distance by the difference
1393 // between calculated and final offsets of current block.
1394 offset -= (blk_starts[i] - blk_offset);
1395 }
1396 // In the following code a nop could be inserted before
1397 // the branch which will increase the backward distance.
1398 bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1399 if (needs_padding && offset <= 0)
1400 offset -= nop_size;
1401
1402 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1403 // We've got a winner. Replace this branch.
1404 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
1405
1406 // Update the jmp_size.
1407 int new_size = replacement->size(_regalloc);
1408 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1409 // Insert padding between avoid_back_to_back branches.
1410 if (needs_padding && replacement->avoid_back_to_back()) {
1411 MachNode *nop = new (this) MachNopNode();
1412 block->insert_node(nop, j++);
1413 _cfg->map_node_to_block(nop, block);
1414 last_inst++;
1415 nop->emit(*cb, _regalloc);
1416 cb->flush_bundle(true);
1417 current_offset = cb->insts_size();
1418 }
1419 #ifdef ASSERT
1420 jmp_target[i] = block_num;
1421 jmp_offset[i] = current_offset - blk_offset;
1422 jmp_size[i] = new_size;
1423 jmp_rule[i] = mach->rule();
1424 #endif
1425 block->map_node(replacement, j);
1426 mach->subsume_by(replacement, C);
1427 n = replacement;
1428 mach = replacement;
1429 }
1430 }
1498
1499 #ifdef ASSERT
1500 if (n->size(_regalloc) < (current_offset-instr_offset)) {
1501 n->dump();
1502 assert(false, "wrong size of mach node");
1503 }
1504 #endif
1505 non_safepoints.observe_instruction(n, current_offset);
1506
1507 // mcall is last "call" that can be a safepoint
1508 // record it so we can see if a poll will directly follow it
1509 // in which case we'll need a pad to make the PcDesc sites unique
1510 // see 5010568. This can be slightly inaccurate but conservative
1511 // in the case that return address is not actually at current_offset.
1512 // This is a small price to pay.
1513
1514 if (is_mcall) {
1515 last_call_offset = current_offset;
1516 }
1517
1518 if (n->is_Mach() && n->as_Mach()->avoid_back_to_back()) {
1519 // Avoid back to back some instructions.
1520 last_avoid_back_to_back_offset = current_offset;
1521 }
1522
1523 // See if this instruction has a delay slot
1524 if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1525 assert(delay_slot != NULL, "expecting delay slot node");
1526
1527 // Back up 1 instruction
1528 cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1529
1530 // Save the offset for the listing
1531 #ifndef PRODUCT
1532 if (node_offsets && delay_slot->_idx < node_offset_limit)
1533 node_offsets[delay_slot->_idx] = cb->insts_size();
1534 #endif
1535
1536 // Support a SafePoint in the delay slot
1537 if (delay_slot->is_MachSafePoint()) {
1538 MachNode *mach = delay_slot->as_Mach();
|
394 reloc_size += CallStubImpl::reloc_call_trampoline();
395
396 MachCallNode *mcall = mach->as_MachCall();
397 // This destination address is NOT PC-relative
398
399 mcall->method_set((intptr_t)mcall->entry_point());
400
401 if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
402 stub_size += CompiledStaticCall::to_interp_stub_size();
403 reloc_size += CompiledStaticCall::reloc_to_interp_stub();
404 }
405 } else if (mach->is_MachSafePoint()) {
406 // If call/safepoint are adjacent, account for possible
407 // nop to disambiguate the two safepoints.
408 // ScheduleAndBundle() can rearrange nodes in a block,
409 // check for all offsets inside this block.
410 if (last_call_adr >= blk_starts[i]) {
411 blk_size += nop_size;
412 }
413 }
414 if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
415 // Nop is inserted between "avoid back to back" instructions.
416 // ScheduleAndBundle() can rearrange nodes in a block,
417 // check for all offsets inside this block.
418 if (last_avoid_back_to_back_adr >= blk_starts[i]) {
419 blk_size += nop_size;
420 }
421 }
422 if (mach->may_be_short_branch()) {
423 if (!nj->is_MachBranch()) {
424 #ifndef PRODUCT
425 nj->dump(3);
426 #endif
427 Unimplemented();
428 }
429 assert(jmp_nidx[i] == -1, "block should have only one branch");
430 jmp_offset[i] = blk_size;
431 jmp_size[i] = nj->size(_regalloc);
432 jmp_nidx[i] = j;
433 has_short_branch_candidate = true;
434 }
435 }
436 blk_size += nj->size(_regalloc);
437 // Remember end of call offset
438 if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
439 last_call_adr = blk_starts[i]+blk_size;
440 }
441 // Remember end of avoid_back_to_back offset
442 if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
443 last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
444 }
445 }
446
447 // When the next block starts a loop, we may insert pad NOP
448 // instructions. Since we cannot know our future alignment,
449 // assume the worst.
450 if (i < nblocks - 1) {
451 Block* nb = _cfg->get_block(i + 1);
452 int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
453 if (max_loop_pad > 0) {
454 assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
455 // Adjust last_call_adr and/or last_avoid_back_to_back_adr.
456 // If either is the last instruction in this block, bump by
457 // max_loop_pad in lock-step with blk_size, so sizing
458 // calculations in subsequent blocks still can conservatively
459 // detect that it may the last instruction in this block.
460 if (last_call_adr == blk_starts[i]+blk_size) {
461 last_call_adr += max_loop_pad;
462 }
508 // This block can be a loop header, account for the padding
509 // in the previous block.
510 int block_padding = block_worst_case_pad[i];
511 assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
512 // In the following code a nop could be inserted before
513 // the branch which will increase the backward distance.
514 bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
515 assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
516
517 if (needs_padding && offset <= 0)
518 offset -= nop_size;
519
520 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
521 // We've got a winner. Replace this branch.
522 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
523
524 // Update the jmp_size.
525 int new_size = replacement->size(_regalloc);
526 int diff = br_size - new_size;
527 assert(diff >= (int)nop_size, "short_branch size should be smaller");
528 // Conservatively take into account padding between
529 // avoid_back_to_back branches. Previous branch could be
530 // converted into avoid_back_to_back branch during next
531 // rounds.
532 if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
533 jmp_offset[i] += nop_size;
534 diff -= nop_size;
535 }
536 adjust_block_start += diff;
537 block->map_node(replacement, idx);
538 mach->subsume_by(replacement, C);
539 mach = replacement;
540 progress = true;
541
542 jmp_size[i] = new_size;
543 DEBUG_ONLY( jmp_target[i] = bnum; );
544 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
545 } else {
546 // The jump distance is not short, try again during next iteration.
547 has_short_branch_candidate = true;
548 }
549 } // (mach->may_be_short_branch())
550 if (mach != NULL && (mach->may_be_short_branch() ||
551 mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
552 last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
553 }
554 blk_starts[i+1] -= adjust_block_start;
555 }
556 }
557
558 #ifdef ASSERT
559 for (uint i = 0; i < nblocks; i++) { // For all blocks
560 if (jmp_target[i] != 0) {
561 int br_size = jmp_size[i];
562 int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]);
563 if (!_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
564 tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
565 }
566 assert(_matcher->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
567 }
568 }
569 #endif
570
571 // Step 3, compute the offsets of all blocks, will be done in fill_buffer()
1296 MachNode *mach = n->as_Mach();
1297 is_mcall = n->is_MachCall();
1298 bool is_sfn = n->is_MachSafePoint();
1299
1300 // If this requires all previous instructions be flushed, then do so
1301 if (is_sfn || is_mcall || mach->alignment_required() != 1) {
1302 cb->flush_bundle(true);
1303 current_offset = cb->insts_size();
1304 }
1305
1306 // A padding may be needed again since a previous instruction
1307 // could be moved to delay slot.
1308
1309 // align the instruction if necessary
1310 int padding = mach->compute_padding(current_offset);
1311 // Make sure safepoint node for polling is distinct from a call's
1312 // return by adding a nop if needed.
1313 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
1314 padding = nop_size;
1315 }
1316 if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
1317 current_offset == last_avoid_back_to_back_offset) {
1318 // Avoid back to back some instructions.
1319 padding = nop_size;
1320 }
1321
1322 if(padding > 0) {
1323 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1324 int nops_cnt = padding / nop_size;
1325 MachNode *nop = new (this) MachNopNode(nops_cnt);
1326 block->insert_node(nop, j++);
1327 last_inst++;
1328 _cfg->map_node_to_block(nop, block);
1329 nop->emit(*cb, _regalloc);
1330 cb->flush_bundle(true);
1331 current_offset = cb->insts_size();
1332 }
1333
1334 // Remember the start of the last call in a basic block
1335 if (is_mcall) {
1336 MachCallNode *mcall = mach->as_MachCall();
1390 if (block_num >= i) {
1391 // Current and following block's offset are not
1392 // finalized yet, adjust distance by the difference
1393 // between calculated and final offsets of current block.
1394 offset -= (blk_starts[i] - blk_offset);
1395 }
1396 // In the following code a nop could be inserted before
1397 // the branch which will increase the backward distance.
1398 bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1399 if (needs_padding && offset <= 0)
1400 offset -= nop_size;
1401
1402 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1403 // We've got a winner. Replace this branch.
1404 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
1405
1406 // Update the jmp_size.
1407 int new_size = replacement->size(_regalloc);
1408 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1409 // Insert padding between avoid_back_to_back branches.
1410 if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
1411 MachNode *nop = new (this) MachNopNode();
1412 block->insert_node(nop, j++);
1413 _cfg->map_node_to_block(nop, block);
1414 last_inst++;
1415 nop->emit(*cb, _regalloc);
1416 cb->flush_bundle(true);
1417 current_offset = cb->insts_size();
1418 }
1419 #ifdef ASSERT
1420 jmp_target[i] = block_num;
1421 jmp_offset[i] = current_offset - blk_offset;
1422 jmp_size[i] = new_size;
1423 jmp_rule[i] = mach->rule();
1424 #endif
1425 block->map_node(replacement, j);
1426 mach->subsume_by(replacement, C);
1427 n = replacement;
1428 mach = replacement;
1429 }
1430 }
1498
1499 #ifdef ASSERT
1500 if (n->size(_regalloc) < (current_offset-instr_offset)) {
1501 n->dump();
1502 assert(false, "wrong size of mach node");
1503 }
1504 #endif
1505 non_safepoints.observe_instruction(n, current_offset);
1506
1507 // mcall is last "call" that can be a safepoint
1508 // record it so we can see if a poll will directly follow it
1509 // in which case we'll need a pad to make the PcDesc sites unique
1510 // see 5010568. This can be slightly inaccurate but conservative
1511 // in the case that return address is not actually at current_offset.
1512 // This is a small price to pay.
1513
1514 if (is_mcall) {
1515 last_call_offset = current_offset;
1516 }
1517
1518 if (n->is_Mach() && n->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
1519 // Avoid back to back some instructions.
1520 last_avoid_back_to_back_offset = current_offset;
1521 }
1522
1523 // See if this instruction has a delay slot
1524 if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1525 assert(delay_slot != NULL, "expecting delay slot node");
1526
1527 // Back up 1 instruction
1528 cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size());
1529
1530 // Save the offset for the listing
1531 #ifndef PRODUCT
1532 if (node_offsets && delay_slot->_idx < node_offset_limit)
1533 node_offsets[delay_slot->_idx] = cb->insts_size();
1534 #endif
1535
1536 // Support a SafePoint in the delay slot
1537 if (delay_slot->is_MachSafePoint()) {
1538 MachNode *mach = delay_slot->as_Mach();
|