509 uint bnum = block->non_connector_successor(0)->_pre_order;
510 int offset = blk_starts[bnum] - br_offs;
511 if (bnum > i) { // adjust following block's offset
512 offset -= adjust_block_start;
513 }
514
515 // This block can be a loop header, account for the padding
516 // in the previous block.
517 int block_padding = block_worst_case_pad[i];
518 assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
519 // In the following code a nop could be inserted before
520 // the branch which will increase the backward distance.
521 bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
522 assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
523
524 if (needs_padding && offset <= 0)
525 offset -= nop_size;
526
527 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
528 // We've got a winner. Replace this branch.
529 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
530
531 // Update the jmp_size.
532 int new_size = replacement->size(_regalloc);
533 int diff = br_size - new_size;
534 assert(diff >= (int)nop_size, "short_branch size should be smaller");
535 // Conservatively take into account padding between
536 // avoid_back_to_back branches. Previous branch could be
537 // converted into avoid_back_to_back branch during next
538 // rounds.
539 if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
540 jmp_offset[i] += nop_size;
541 diff -= nop_size;
542 }
543 adjust_block_start += diff;
544 block->map_node(replacement, idx);
545 mach->subsume_by(replacement, C);
546 mach = replacement;
547 progress = true;
548
549 jmp_size[i] = new_size;
1157
1158 if (has_method_handle_invokes())
1159 total_req += deopt_handler_req; // deopt MH handler
1160
1161 CodeBuffer* cb = code_buffer();
1162 cb->initialize(total_req, locs_req);
1163
1164 // Have we run out of code space?
1165 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1166 C->record_failure("CodeCache is full");
1167 CompileBroker::handle_full_code_cache();
1168 return NULL;
1169 }
1170 // Configure the code buffer.
1171 cb->initialize_consts_size(const_req);
1172 cb->initialize_stubs_size(stub_req);
1173 cb->initialize_oop_recorder(env()->oop_recorder());
1174
1175 // fill in the nop array for bundling computations
1176 MachNode *_nop_list[Bundle::_nop_count];
1177 Bundle::initialize_nops(_nop_list, this);
1178
1179 return cb;
1180 }
1181
1182 //------------------------------fill_buffer------------------------------------
1183 void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
1184 // blk_starts[] contains offsets calculated during short branches processing,
1185 // offsets should not be increased during following steps.
1186
1187 // Compute the size of first NumberOfLoopInstrToAlign instructions at head
1188 // of a loop. It is used to determine the padding for loop alignment.
1189 compute_loop_first_inst_sizes();
1190
1191 // Create oopmap set.
1192 _oop_map_set = new OopMapSet();
1193
1194 // !!!!! This preserves old handling of oopmaps for now
1195 debug_info()->set_oopmaps(_oop_map_set);
1196
1197 uint nblocks = _cfg->number_of_blocks();
1391 bool delay_slot_is_used = valid_bundle_info(n) &&
1392 node_bundling(n)->use_unconditional_delay();
1393 if (!delay_slot_is_used && mach->may_be_short_branch()) {
1394 assert(delay_slot == NULL, "not expecting delay slot node");
1395 int br_size = n->size(_regalloc);
1396 int offset = blk_starts[block_num] - current_offset;
1397 if (block_num >= i) {
1398 // Current and following block's offset are not
1399 // finalized yet, adjust distance by the difference
1400 // between calculated and final offsets of current block.
1401 offset -= (blk_starts[i] - blk_offset);
1402 }
1403 // In the following code a nop could be inserted before
1404 // the branch which will increase the backward distance.
1405 bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1406 if (needs_padding && offset <= 0)
1407 offset -= nop_size;
1408
1409 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1410 // We've got a winner. Replace this branch.
1411 MachNode* replacement = mach->as_MachBranch()->short_branch_version(this);
1412
1413 // Update the jmp_size.
1414 int new_size = replacement->size(_regalloc);
1415 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1416 // Insert padding between avoid_back_to_back branches.
1417 if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
1418 MachNode *nop = new MachNopNode();
1419 block->insert_node(nop, j++);
1420 _cfg->map_node_to_block(nop, block);
1421 last_inst++;
1422 nop->emit(*cb, _regalloc);
1423 cb->flush_bundle(true);
1424 current_offset = cb->insts_size();
1425 }
1426 #ifdef ASSERT
1427 jmp_target[i] = block_num;
1428 jmp_offset[i] = current_offset - blk_offset;
1429 jmp_size[i] = new_size;
1430 jmp_rule[i] = mach->rule();
1431 #endif
|
509 uint bnum = block->non_connector_successor(0)->_pre_order;
510 int offset = blk_starts[bnum] - br_offs;
511 if (bnum > i) { // adjust following block's offset
512 offset -= adjust_block_start;
513 }
514
515 // This block can be a loop header, account for the padding
516 // in the previous block.
517 int block_padding = block_worst_case_pad[i];
518 assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
519 // In the following code a nop could be inserted before
520 // the branch which will increase the backward distance.
521 bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
522 assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
523
524 if (needs_padding && offset <= 0)
525 offset -= nop_size;
526
527 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
528 // We've got a winner. Replace this branch.
529 MachNode* replacement = mach->as_MachBranch()->short_branch_version();
530
531 // Update the jmp_size.
532 int new_size = replacement->size(_regalloc);
533 int diff = br_size - new_size;
534 assert(diff >= (int)nop_size, "short_branch size should be smaller");
535 // Conservatively take into account padding between
536 // avoid_back_to_back branches. Previous branch could be
537 // converted into avoid_back_to_back branch during next
538 // rounds.
539 if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
540 jmp_offset[i] += nop_size;
541 diff -= nop_size;
542 }
543 adjust_block_start += diff;
544 block->map_node(replacement, idx);
545 mach->subsume_by(replacement, C);
546 mach = replacement;
547 progress = true;
548
549 jmp_size[i] = new_size;
1157
1158 if (has_method_handle_invokes())
1159 total_req += deopt_handler_req; // deopt MH handler
1160
1161 CodeBuffer* cb = code_buffer();
1162 cb->initialize(total_req, locs_req);
1163
1164 // Have we run out of code space?
1165 if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
1166 C->record_failure("CodeCache is full");
1167 CompileBroker::handle_full_code_cache();
1168 return NULL;
1169 }
1170 // Configure the code buffer.
1171 cb->initialize_consts_size(const_req);
1172 cb->initialize_stubs_size(stub_req);
1173 cb->initialize_oop_recorder(env()->oop_recorder());
1174
1175 // fill in the nop array for bundling computations
1176 MachNode *_nop_list[Bundle::_nop_count];
1177 Bundle::initialize_nops(_nop_list);
1178
1179 return cb;
1180 }
1181
1182 //------------------------------fill_buffer------------------------------------
1183 void Compile::fill_buffer(CodeBuffer* cb, uint* blk_starts) {
1184 // blk_starts[] contains offsets calculated during short branches processing,
1185 // offsets should not be increased during following steps.
1186
1187 // Compute the size of first NumberOfLoopInstrToAlign instructions at head
1188 // of a loop. It is used to determine the padding for loop alignment.
1189 compute_loop_first_inst_sizes();
1190
1191 // Create oopmap set.
1192 _oop_map_set = new OopMapSet();
1193
1194 // !!!!! This preserves old handling of oopmaps for now
1195 debug_info()->set_oopmaps(_oop_map_set);
1196
1197 uint nblocks = _cfg->number_of_blocks();
1391 bool delay_slot_is_used = valid_bundle_info(n) &&
1392 node_bundling(n)->use_unconditional_delay();
1393 if (!delay_slot_is_used && mach->may_be_short_branch()) {
1394 assert(delay_slot == NULL, "not expecting delay slot node");
1395 int br_size = n->size(_regalloc);
1396 int offset = blk_starts[block_num] - current_offset;
1397 if (block_num >= i) {
1398 // Current and following block's offset are not
1399 // finalized yet, adjust distance by the difference
1400 // between calculated and final offsets of current block.
1401 offset -= (blk_starts[i] - blk_offset);
1402 }
1403 // In the following code a nop could be inserted before
1404 // the branch which will increase the backward distance.
1405 bool needs_padding = (current_offset == last_avoid_back_to_back_offset);
1406 if (needs_padding && offset <= 0)
1407 offset -= nop_size;
1408
1409 if (_matcher->is_short_branch_offset(mach->rule(), br_size, offset)) {
1410 // We've got a winner. Replace this branch.
1411 MachNode* replacement = mach->as_MachBranch()->short_branch_version();
1412
1413 // Update the jmp_size.
1414 int new_size = replacement->size(_regalloc);
1415 assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller");
1416 // Insert padding between avoid_back_to_back branches.
1417 if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
1418 MachNode *nop = new MachNopNode();
1419 block->insert_node(nop, j++);
1420 _cfg->map_node_to_block(nop, block);
1421 last_inst++;
1422 nop->emit(*cb, _regalloc);
1423 cb->flush_bundle(true);
1424 current_offset = cb->insts_size();
1425 }
1426 #ifdef ASSERT
1427 jmp_target[i] = block_num;
1428 jmp_offset[i] = current_offset - blk_offset;
1429 jmp_size[i] = new_size;
1430 jmp_rule[i] = mach->rule();
1431 #endif
|