src/share/vm/c1/c1_LinearScan.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File c1-coops Sdiff src/share/vm/c1

src/share/vm/c1/c1_LinearScan.cpp

Print this page




1256     }
1257   }
1258 }
1259 
1260 
1261 void LinearScan::build_intervals() {
1262   TIME_LINEAR_SCAN(timer_build_intervals);
1263 
1264   // initialize interval list with expected number of intervals
1265   // (32 is added to have some space for split children without having to resize the list)
1266   _intervals = IntervalList(num_virtual_regs() + 32);
1267   // initialize all slots that are used by build_intervals
1268   _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
1269 
1270   // create a list with all caller-save registers (cpu, fpu, xmm)
1271   // when an instruction is a call, a temp range is created for all these registers
1272   int num_caller_save_registers = 0;
1273   int caller_save_registers[LinearScan::nof_regs];
1274 
1275   int i;
1276   for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) {
1277     LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
1278     assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1279     assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1280     caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1281   }
1282 
1283   // temp ranges for fpu registers are only created when the method has
1284   // virtual fpu operands. Otherwise no allocation for fpu registers is
1285   // perfomed and so the temp ranges would be useless
1286   if (has_fpu_registers()) {
1287 #ifdef X86
1288     if (UseSSE < 2) {
1289 #endif
1290       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
1291         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
1292         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1293         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1294         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1295       }
1296 #ifdef X86


3540       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3541         Interval* interval = interval_at(reg_num(opr));
3542         if (op->id() != -1) {
3543           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
3544         }
3545 
3546         has_error |= check_state(input_state, interval->assigned_reg(),   interval->split_parent());
3547         has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
3548 
3549         // When an operand is marked with is_last_use, then the fpu stack allocator
3550         // removes the register from the fpu stack -> the register contains no value
3551         if (opr->is_last_use()) {
3552           state_put(input_state, interval->assigned_reg(),   NULL);
3553           state_put(input_state, interval->assigned_regHi(), NULL);
3554         }
3555       }
3556     }
3557 
3558     // invalidate all caller save registers at calls
3559     if (visitor.has_call()) {
3560       for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) {
3561         state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
3562       }
3563       for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
3564         state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
3565       }
3566 
3567 #ifdef X86
3568       for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
3569         state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
3570       }
3571 #endif
3572     }
3573 
3574     // process xhandler before output and temp operands
3575     XHandlers* xhandlers = visitor.all_xhandler();
3576     n = xhandlers->length();
3577     for (int k = 0; k < n; k++) {
3578       process_xhandler(xhandlers->handler_at(k), input_state);
3579     }
3580 


5579       return true;
5580     }
5581 
5582   }
5583 #endif
5584   return false;
5585 }
5586 
5587 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
5588   BasicType type = cur->type();
5589   _num_phys_regs = LinearScan::num_physical_regs(type);
5590   _adjacent_regs = LinearScan::requires_adjacent_regs(type);
5591 
5592   if (pd_init_regs_for_alloc(cur)) {
5593     // the appropriate register range was selected.
5594   } else if (type == T_FLOAT || type == T_DOUBLE) {
5595     _first_reg = pd_first_fpu_reg;
5596     _last_reg = pd_last_fpu_reg;
5597   } else {
5598     _first_reg = pd_first_cpu_reg;
5599     _last_reg = pd_last_cpu_reg;
5600   }
5601 
5602   assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
5603   assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
5604 }
5605 
5606 
5607 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
5608   if (op->code() != lir_move) {
5609     return false;
5610   }
5611   assert(op->as_Op1() != NULL, "move must be LIR_Op1");
5612 
5613   LIR_Opr in = ((LIR_Op1*)op)->in_opr();
5614   LIR_Opr res = ((LIR_Op1*)op)->result_opr();
5615   return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
5616 }
5617 
5618 // optimization (especially for phi functions of nested loops):
5619 // assign same spill slot to non-intersecting intervals




1256     }
1257   }
1258 }
1259 
1260 
1261 void LinearScan::build_intervals() {
1262   TIME_LINEAR_SCAN(timer_build_intervals);
1263 
1264   // initialize interval list with expected number of intervals
1265   // (32 is added to have some space for split children without having to resize the list)
1266   _intervals = IntervalList(num_virtual_regs() + 32);
1267   // initialize all slots that are used by build_intervals
1268   _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
1269 
1270   // create a list with all caller-save registers (cpu, fpu, xmm)
1271   // when an instruction is a call, a temp range is created for all these registers
1272   int num_caller_save_registers = 0;
1273   int caller_save_registers[LinearScan::nof_regs];
1274 
1275   int i;
1276   for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) {
1277     LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
1278     assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1279     assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1280     caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1281   }
1282 
1283   // temp ranges for fpu registers are only created when the method has
1284   // virtual fpu operands. Otherwise no allocation for fpu registers is
1285   // perfomed and so the temp ranges would be useless
1286   if (has_fpu_registers()) {
1287 #ifdef X86
1288     if (UseSSE < 2) {
1289 #endif
1290       for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
1291         LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
1292         assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
1293         assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
1294         caller_save_registers[num_caller_save_registers++] = reg_num(opr);
1295       }
1296 #ifdef X86


3540       if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
3541         Interval* interval = interval_at(reg_num(opr));
3542         if (op->id() != -1) {
3543           interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
3544         }
3545 
3546         has_error |= check_state(input_state, interval->assigned_reg(),   interval->split_parent());
3547         has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
3548 
3549         // When an operand is marked with is_last_use, then the fpu stack allocator
3550         // removes the register from the fpu stack -> the register contains no value
3551         if (opr->is_last_use()) {
3552           state_put(input_state, interval->assigned_reg(),   NULL);
3553           state_put(input_state, interval->assigned_regHi(), NULL);
3554         }
3555       }
3556     }
3557 
3558     // invalidate all caller save registers at calls
3559     if (visitor.has_call()) {
3560       for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) {
3561         state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
3562       }
3563       for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
3564         state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
3565       }
3566 
3567 #ifdef X86
3568       for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
3569         state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
3570       }
3571 #endif
3572     }
3573 
3574     // process xhandler before output and temp operands
3575     XHandlers* xhandlers = visitor.all_xhandler();
3576     n = xhandlers->length();
3577     for (int k = 0; k < n; k++) {
3578       process_xhandler(xhandlers->handler_at(k), input_state);
3579     }
3580 


5579       return true;
5580     }
5581 
5582   }
5583 #endif
5584   return false;
5585 }
5586 
5587 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
5588   BasicType type = cur->type();
5589   _num_phys_regs = LinearScan::num_physical_regs(type);
5590   _adjacent_regs = LinearScan::requires_adjacent_regs(type);
5591 
5592   if (pd_init_regs_for_alloc(cur)) {
5593     // the appropriate register range was selected.
5594   } else if (type == T_FLOAT || type == T_DOUBLE) {
5595     _first_reg = pd_first_fpu_reg;
5596     _last_reg = pd_last_fpu_reg;
5597   } else {
5598     _first_reg = pd_first_cpu_reg;
5599     _last_reg = FrameMap::last_cpu_reg();
5600   }
5601 
5602   assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
5603   assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
5604 }
5605 
5606 
5607 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
5608   if (op->code() != lir_move) {
5609     return false;
5610   }
5611   assert(op->as_Op1() != NULL, "move must be LIR_Op1");
5612 
5613   LIR_Opr in = ((LIR_Op1*)op)->in_opr();
5614   LIR_Opr res = ((LIR_Op1*)op)->result_opr();
5615   return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
5616 }
5617 
5618 // optimization (especially for phi functions of nested loops):
5619 // assign same spill slot to non-intersecting intervals


src/share/vm/c1/c1_LinearScan.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File