1239 }
1240 }
1241 }
1242
1243
1244 void LinearScan::build_intervals() {
1245 TIME_LINEAR_SCAN(timer_build_intervals);
1246
1247 // initialize interval list with expected number of intervals
1248 // (32 is added to have some space for split children without having to resize the list)
1249 _intervals = IntervalList(num_virtual_regs() + 32);
1250 // initialize all slots that are used by build_intervals
1251 _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
1252
1253 // create a list with all caller-save registers (cpu, fpu, xmm)
1254 // when an instruction is a call, a temp range is created for all these registers
1255 int num_caller_save_registers = 0;
1256 int caller_save_registers[LinearScan::nof_regs];
1257
1258 int i;
1259 for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) {
1260 LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
1261 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1262 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1263 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1264 }
1265
1266 // temp ranges for fpu registers are only created when the method has
1267 // virtual fpu operands. Otherwise no allocation for fpu registers is
1268 // perfomed and so the temp ranges would be useless
1269 if (has_fpu_registers()) {
1270 #ifdef X86
1271 if (UseSSE < 2) {
1272 #endif
1273 for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
1274 LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
1275 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1276 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1277 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1278 }
1279 #ifdef X86
3523 if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3524 Interval* interval = interval_at(reg_num(opr));
3525 if (op->id() != -1) {
3526 interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
3527 }
3528
3529 has_error |= check_state(input_state, interval->assigned_reg(), interval->split_parent());
3530 has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
3531
3532 // When an operand is marked with is_last_use, then the fpu stack allocator
3533 // removes the register from the fpu stack -> the register contains no value
3534 if (opr->is_last_use()) {
3535 state_put(input_state, interval->assigned_reg(), NULL);
3536 state_put(input_state, interval->assigned_regHi(), NULL);
3537 }
3538 }
3539 }
3540
3541 // invalidate all caller save registers at calls
3542 if (visitor.has_call()) {
3543 for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) {
3544 state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
3545 }
3546 for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
3547 state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
3548 }
3549
3550 #ifdef X86
3551 for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
3552 state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
3553 }
3554 #endif
3555 }
3556
3557 // process xhandler before output and temp operands
3558 XHandlers* xhandlers = visitor.all_xhandler();
3559 n = xhandlers->length();
3560 for (int k = 0; k < n; k++) {
3561 process_xhandler(xhandlers->handler_at(k), input_state);
3562 }
3563
5562 return true;
5563 }
5564
5565 }
5566 #endif
5567 return false;
5568 }
5569
5570 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
5571 BasicType type = cur->type();
5572 _num_phys_regs = LinearScan::num_physical_regs(type);
5573 _adjacent_regs = LinearScan::requires_adjacent_regs(type);
5574
5575 if (pd_init_regs_for_alloc(cur)) {
5576 // the appropriate register range was selected.
5577 } else if (type == T_FLOAT || type == T_DOUBLE) {
5578 _first_reg = pd_first_fpu_reg;
5579 _last_reg = pd_last_fpu_reg;
5580 } else {
5581 _first_reg = pd_first_cpu_reg;
5582 _last_reg = pd_last_cpu_reg;
5583 }
5584
5585 assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
5586 assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
5587 }
5588
5589
5590 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
5591 if (op->code() != lir_move) {
5592 return false;
5593 }
5594 assert(op->as_Op1() != NULL, "move must be LIR_Op1");
5595
5596 LIR_Opr in = ((LIR_Op1*)op)->in_opr();
5597 LIR_Opr res = ((LIR_Op1*)op)->result_opr();
5598 return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
5599 }
5600
5601 // optimization (especially for phi functions of nested loops):
5602 // assign same spill slot to non-intersecting intervals
|
1239 }
1240 }
1241 }
1242
1243
1244 void LinearScan::build_intervals() {
1245 TIME_LINEAR_SCAN(timer_build_intervals);
1246
1247 // initialize interval list with expected number of intervals
1248 // (32 is added to have some space for split children without having to resize the list)
1249 _intervals = IntervalList(num_virtual_regs() + 32);
1250 // initialize all slots that are used by build_intervals
1251 _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
1252
1253 // create a list with all caller-save registers (cpu, fpu, xmm)
1254 // when an instruction is a call, a temp range is created for all these registers
1255 int num_caller_save_registers = 0;
1256 int caller_save_registers[LinearScan::nof_regs];
1257
1258 int i;
1259 for (i = 0; i < FrameMap::nof_caller_save_cpu_regs - FrameMap::cpu_reg_range_reduction(); i++) {
1260 LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
1261 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1262 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1263 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1264 }
1265
1266 // temp ranges for fpu registers are only created when the method has
1267 // virtual fpu operands. Otherwise no allocation for fpu registers is
1268 // perfomed and so the temp ranges would be useless
1269 if (has_fpu_registers()) {
1270 #ifdef X86
1271 if (UseSSE < 2) {
1272 #endif
1273 for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
1274 LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
1275 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1276 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1277 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1278 }
1279 #ifdef X86
3523 if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3524 Interval* interval = interval_at(reg_num(opr));
3525 if (op->id() != -1) {
3526 interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
3527 }
3528
3529 has_error |= check_state(input_state, interval->assigned_reg(), interval->split_parent());
3530 has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
3531
3532 // When an operand is marked with is_last_use, then the fpu stack allocator
3533 // removes the register from the fpu stack -> the register contains no value
3534 if (opr->is_last_use()) {
3535 state_put(input_state, interval->assigned_reg(), NULL);
3536 state_put(input_state, interval->assigned_regHi(), NULL);
3537 }
3538 }
3539 }
3540
3541 // invalidate all caller save registers at calls
3542 if (visitor.has_call()) {
3543 for (j = 0; j < FrameMap::nof_caller_save_cpu_regs - FrameMap::cpu_reg_range_reduction(); j++) {
3544 state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
3545 }
3546 for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
3547 state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
3548 }
3549
3550 #ifdef X86
3551 for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
3552 state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
3553 }
3554 #endif
3555 }
3556
3557 // process xhandler before output and temp operands
3558 XHandlers* xhandlers = visitor.all_xhandler();
3559 n = xhandlers->length();
3560 for (int k = 0; k < n; k++) {
3561 process_xhandler(xhandlers->handler_at(k), input_state);
3562 }
3563
5562 return true;
5563 }
5564
5565 }
5566 #endif
5567 return false;
5568 }
5569
5570 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
5571 BasicType type = cur->type();
5572 _num_phys_regs = LinearScan::num_physical_regs(type);
5573 _adjacent_regs = LinearScan::requires_adjacent_regs(type);
5574
5575 if (pd_init_regs_for_alloc(cur)) {
5576 // the appropriate register range was selected.
5577 } else if (type == T_FLOAT || type == T_DOUBLE) {
5578 _first_reg = pd_first_fpu_reg;
5579 _last_reg = pd_last_fpu_reg;
5580 } else {
5581 _first_reg = pd_first_cpu_reg;
5582 _last_reg = pd_last_cpu_reg - FrameMap::cpu_reg_range_reduction();
5583 }
5584
5585 assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
5586 assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
5587 }
5588
5589
5590 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
5591 if (op->code() != lir_move) {
5592 return false;
5593 }
5594 assert(op->as_Op1() != NULL, "move must be LIR_Op1");
5595
5596 LIR_Opr in = ((LIR_Op1*)op)->in_opr();
5597 LIR_Opr res = ((LIR_Op1*)op)->result_opr();
5598 return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
5599 }
5600
5601 // optimization (especially for phi functions of nested loops):
5602 // assign same spill slot to non-intersecting intervals
|