< prev index next >

src/share/vm/c1/c1_LinearScan.cpp

Print this page
rev 10543 : imported patch c1_LIR
rev 10548 : imported patch some fixes
rev 10549 : imported patch c1_Instruction_BBA
rev 10551 : imported patch c1_LinearScan
rev 10554 : imported patch s/find_from_end/find/g


 478 // ********** Phase 1: number all instructions in all blocks
 479 // Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan.
 480 
 481 void LinearScan::number_instructions() {
 482   {
 483     // dummy-timer to measure the cost of the timer itself
 484     // (this time is then subtracted from all other timers to get the real value)
 485     TIME_LINEAR_SCAN(timer_do_nothing);
 486   }
 487   TIME_LINEAR_SCAN(timer_number_instructions);
 488 
 489   // Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node.
 490   int num_blocks = block_count();
 491   int num_instructions = 0;
 492   int i;
 493   for (i = 0; i < num_blocks; i++) {
 494     num_instructions += block_at(i)->lir()->instructions_list()->length();
 495   }
 496 
 497   // initialize with correct length
 498   _lir_ops = LIR_OpArray(num_instructions);
 499   _block_of_op = BlockBeginArray(num_instructions);
 500 
 501   int op_id = 0;
 502   int idx = 0;
 503 
 504   for (i = 0; i < num_blocks; i++) {
 505     BlockBegin* block = block_at(i);
 506     block->set_first_lir_instruction_id(op_id);
 507     LIR_OpList* instructions = block->lir()->instructions_list();
 508 
 509     int num_inst = instructions->length();
 510     for (int j = 0; j < num_inst; j++) {
 511       LIR_Op* op = instructions->at(j);
 512       op->set_id(op_id);
 513 
 514       _lir_ops.at_put(idx, op);
 515       _block_of_op.at_put(idx, block);
 516       assert(lir_op_with_id(op_id) == op, "must match");
 517 
 518       idx++;
 519       op_id += 2; // numbering of lir_ops by two


2489       assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions");
2490     }
2491   }
2492 }
2493 
2494 
2495 // frequently used constants
2496 // Allocate them with new so they are never destroyed (otherwise, a
2497 // forced exit could destroy these objects while they are still in
2498 // use).
2499 ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL);
2500 ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1);
2501 ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(0);
2502 ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1);
2503 ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2);
2504 LocationValue*         _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location());
2505 
2506 void LinearScan::init_compute_debug_info() {
2507   // cache for frequently used scope values
2508   // (cpu registers and stack slots)
2509   _scope_value_cache = ScopeValueArray((LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2, NULL);

2510 }
2511 
2512 MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) {
2513   Location loc;
2514   if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) {
2515     bailout("too large frame");
2516   }
2517   ScopeValue* object_scope_value = new LocationValue(loc);
2518 
2519   if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) {
2520     bailout("too large frame");
2521   }
2522   return new MonitorValue(object_scope_value, loc);
2523 }
2524 
2525 LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) {
2526   Location loc;
2527   if (!frame_map()->locations_for_slot(name, loc_type, &loc)) {
2528     bailout("too large frame");
2529   }


3024           !dst->is_pointer() && !src->is_pointer() &&
3025           src->is_same_register(dst)) {
3026         instructions->at_put(j, NULL);
3027         has_dead = true;
3028       }
3029     }
3030   }
3031 
3032   if (has_dead) {
3033     // iterate all instructions of the block and remove all null-values.
3034     int insert_point = 0;
3035     for (int j = 0; j < num_inst; j++) {
3036       LIR_Op* op = instructions->at(j);
3037       if (op != NULL) {
3038         if (insert_point != j) {
3039           instructions->at_put(insert_point, op);
3040         }
3041         insert_point++;
3042       }
3043     }
3044     instructions->truncate(insert_point);
3045   }
3046 }
3047 
3048 void LinearScan::assign_reg_num() {
3049   TIME_LINEAR_SCAN(timer_assign_reg_num);
3050 
3051   init_compute_debug_info();
3052   IntervalWalker* iw = init_compute_oop_maps();
3053 
3054   int num_blocks = block_count();
3055   for (int i = 0; i < num_blocks; i++) {
3056     BlockBegin* block = block_at(i);
3057     assign_reg_num(block->lir()->instructions_list(), iw);
3058   }
3059 }
3060 
3061 
3062 void LinearScan::do_linear_scan() {
3063   NOT_PRODUCT(_total_timer.begin_method());
3064 


3428 
3429   // accessors
3430   IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); }
3431   void          set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); }
3432   void          add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); }
3433 
3434   // helper functions
3435   IntervalList* copy(IntervalList* input_state);
3436   void          state_put(IntervalList* input_state, int reg, Interval* interval);
3437   bool          check_state(IntervalList* input_state, int reg, Interval* interval);
3438 
3439   void process_block(BlockBegin* block);
3440   void process_xhandler(XHandler* xhandler, IntervalList* input_state);
3441   void process_successor(BlockBegin* block, IntervalList* input_state);
3442   void process_operations(LIR_List* ops, IntervalList* input_state);
3443 
3444  public:
3445   RegisterVerifier(LinearScan* allocator)
3446     : _allocator(allocator)
3447     , _work_list(16)
3448     , _saved_states(BlockBegin::number_of_blocks(), NULL)
3449   { }
3450 
3451   void verify(BlockBegin* start);
3452 };
3453 
3454 
3455 // entry function from LinearScan that starts the verification
3456 void LinearScan::verify_registers() {
3457   RegisterVerifier verifier(this);
3458   verifier.verify(block_at(0));
3459 }
3460 
3461 
3462 void RegisterVerifier::verify(BlockBegin* start) {
3463   // setup input registers (method arguments) for first block
3464   int input_state_len = state_size();
3465   IntervalList* input_state = new IntervalList(input_state_len, input_state_len, NULL);
3466   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3467   for (int n = 0; n < args->length(); n++) {
3468     LIR_Opr opr = args->at(n);


4434     assert(prev != NULL, "split before start of first range");
4435     result->_first = cur;
4436     prev->set_next(Range::end());
4437   }
4438   result->_current = result->_first;
4439   _cached_to = -1; // clear cached value
4440 
4441   // split list of use positions
4442   int total_len = _use_pos_and_kinds.length();
4443   int start_idx = total_len - 2;
4444   while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) {
4445     start_idx -= 2;
4446   }
4447 
4448   intStack new_use_pos_and_kinds(total_len - start_idx);
4449   int i;
4450   for (i = start_idx + 2; i < total_len; i++) {
4451     new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i));
4452   }
4453 
4454   _use_pos_and_kinds.truncate(start_idx + 2);
4455   result->_use_pos_and_kinds = _use_pos_and_kinds;
4456   _use_pos_and_kinds = new_use_pos_and_kinds;
4457 
4458 #ifdef ASSERT
4459   assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4460   assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4461   assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries");
4462 
4463   for (i = 0; i < _use_pos_and_kinds.length(); i += 2) {
4464     assert(_use_pos_and_kinds.at(i) < split_pos, "must be");
4465     assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4466   }
4467   for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) {
4468     assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be");
4469     assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4470   }
4471 #endif
4472 
4473   return result;
4474 }


5522   if (_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to) {
5523     *need_split = true;
5524   }
5525 
5526   return max_reg;
5527 }
5528 
5529 void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) {
5530   assert(reg != any_reg, "no register assigned");
5531 
5532   for (int i = 0; i < _spill_intervals[reg]->length(); i++) {
5533     Interval* it = _spill_intervals[reg]->at(i);
5534     remove_from_list(it);
5535     split_and_spill_interval(it);
5536   }
5537 
5538   if (regHi != any_reg) {
5539     IntervalList* processed = _spill_intervals[reg];
5540     for (int i = 0; i < _spill_intervals[regHi]->length(); i++) {
5541       Interval* it = _spill_intervals[regHi]->at(i);
5542       if (processed->find_from_end(it) == -1) {
5543         remove_from_list(it);
5544         split_and_spill_interval(it);
5545       }
5546     }
5547   }
5548 }
5549 
5550 
5551 // Split an Interval and spill it to memory so that cur can be placed in a register
5552 void LinearScanWalker::alloc_locked_reg(Interval* cur) {
5553   TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print());
5554 
5555   // collect current usage of registers
5556   init_use_lists(false);
5557   spill_exclude_active_fixed();
5558 //  spill_block_unhandled_fixed(cur);
5559   assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5560   spill_block_inactive_fixed(cur);
5561   spill_collect_active_any();
5562   spill_collect_inactive_any(cur);


6193   int num_blocks = code->length();
6194 
6195   while (old_pos < num_blocks) {
6196     BlockBegin* block = code->at(old_pos);
6197 
6198     if (can_delete_block(block)) {
6199       BlockBegin* new_target = block->sux_at(0);
6200 
6201       // propagate backward branch target flag for correct code alignment
6202       if (block->is_set(BlockBegin::backward_branch_target_flag)) {
6203         new_target->set(BlockBegin::backward_branch_target_flag);
6204       }
6205 
6206       // collect a list with all predecessors that contains each predecessor only once
6207       // the predecessors of cur are changed during the substitution, so a copy of the
6208       // predecessor list is necessary
6209       int j;
6210       _original_preds.clear();
6211       for (j = block->number_of_preds() - 1; j >= 0; j--) {
6212         BlockBegin* pred = block->pred_at(j);
6213         if (_original_preds.index_of(pred) == -1) {
6214           _original_preds.append(pred);
6215         }
6216       }
6217 
6218       for (j = _original_preds.length() - 1; j >= 0; j--) {
6219         BlockBegin* pred = _original_preds.at(j);
6220         substitute_branch_target(pred, block, new_target);
6221         pred->substitute_sux(block, new_target);
6222       }
6223     } else {
6224       // adjust position of this block in the block list if blocks before
6225       // have been deleted
6226       if (new_pos != old_pos) {
6227         code->at_put(new_pos, code->at(old_pos));
6228       }
6229       new_pos++;
6230     }
6231     old_pos++;
6232   }
6233   code->truncate(new_pos);
6234 
6235   DEBUG_ONLY(verify(code));
6236 }
6237 
6238 void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
6239   // skip the last block because there a branch is always necessary
6240   for (int i = code->length() - 2; i >= 0; i--) {
6241     BlockBegin* block = code->at(i);
6242     LIR_OpList* instructions = block->lir()->instructions_list();
6243 
6244     LIR_Op* last_op = instructions->last();
6245     if (last_op->code() == lir_branch) {
6246       assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6247       LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op;
6248 
6249       assert(last_branch->block() != NULL, "last branch must always have a block as target");
6250       assert(last_branch->label() == last_branch->block()->label(), "must be equal");
6251 
6252       if (last_branch->info() == NULL) {
6253         if (last_branch->block() == code->at(i + 1)) {
6254 
6255           TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6256 
6257           // delete last branch instruction
6258           instructions->truncate(instructions->length() - 1);
6259 
6260         } else {
6261           LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6262           if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6263             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6264             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6265 
6266             if (prev_branch->stub() == NULL) {
6267 
6268               LIR_Op2* prev_cmp = NULL;
6269               // There might be a cmove inserted for profiling which depends on the same
6270               // compare. If we change the condition of the respective compare, we have
6271               // to take care of this cmove as well.
6272               LIR_Op2* prev_cmove = NULL;
6273 
6274               for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
6275                 prev_op = instructions->at(j);
6276                 // check for the cmove
6277                 if (prev_op->code() == lir_cmove) {
6278                   assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2");
6279                   prev_cmove = (LIR_Op2*)prev_op;
6280                   assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
6281                 }
6282                 if (prev_op->code() == lir_cmp) {
6283                   assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
6284                   prev_cmp = (LIR_Op2*)prev_op;
6285                   assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
6286                 }
6287               }
6288               assert(prev_cmp != NULL, "should have found comp instruction for branch");
6289               if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6290 
6291                 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6292 
6293                 // eliminate a conditional branch to the immediate successor
6294                 prev_branch->change_block(last_branch->block());
6295                 prev_branch->negate_cond();
6296                 prev_cmp->set_condition(prev_branch->cond());
6297                 instructions->truncate(instructions->length() - 1);
6298                 // if we do change the condition, we have to change the cmove as well
6299                 if (prev_cmove != NULL) {
6300                   prev_cmove->set_condition(prev_branch->cond());
6301                   LIR_Opr t = prev_cmove->in_opr1();
6302                   prev_cmove->set_in_opr1(prev_cmove->in_opr2());
6303                   prev_cmove->set_in_opr2(t);
6304                 }
6305               }
6306             }
6307           }
6308         }
6309       }
6310     }
6311   }
6312 
6313   DEBUG_ONLY(verify(code));
6314 }
6315 
6316 void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
6317 #ifdef ASSERT


6360 #endif
6361           }
6362         }
6363       }
6364     }
6365   }
6366 }
6367 
6368 
6369 #ifdef ASSERT
6370 void ControlFlowOptimizer::verify(BlockList* code) {
6371   for (int i = 0; i < code->length(); i++) {
6372     BlockBegin* block = code->at(i);
6373     LIR_OpList* instructions = block->lir()->instructions_list();
6374 
6375     int j;
6376     for (j = 0; j < instructions->length(); j++) {
6377       LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch();
6378 
6379       if (op_branch != NULL) {
6380         assert(op_branch->block() == NULL || code->index_of(op_branch->block()) != -1, "branch target not valid");
6381         assert(op_branch->ublock() == NULL || code->index_of(op_branch->ublock()) != -1, "branch target not valid");
6382       }
6383     }
6384 
6385     for (j = 0; j < block->number_of_sux() - 1; j++) {
6386       BlockBegin* sux = block->sux_at(j);
6387       assert(code->index_of(sux) != -1, "successor not valid");
6388     }
6389 
6390     for (j = 0; j < block->number_of_preds() - 1; j++) {
6391       BlockBegin* pred = block->pred_at(j);
6392       assert(code->index_of(pred) != -1, "successor not valid");
6393     }
6394   }
6395 }
6396 #endif
6397 
6398 
6399 #ifndef PRODUCT
6400 
6401 // Implementation of LinearStatistic
6402 
6403 const char* LinearScanStatistic::counter_name(int counter_idx) {
6404   switch (counter_idx) {
6405     case counter_method:          return "compiled methods";
6406     case counter_fpu_method:      return "methods using fpu";
6407     case counter_loop_method:     return "methods with loops";
6408     case counter_exception_method:return "methods with xhandler";
6409 
6410     case counter_loop:            return "loops";
6411     case counter_block:           return "blocks";
6412     case counter_loop_block:      return "blocks inside loop";




 478 // ********** Phase 1: number all instructions in all blocks
 479 // Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan.
 480 
 481 void LinearScan::number_instructions() {
 482   {
 483     // dummy-timer to measure the cost of the timer itself
 484     // (this time is then subtracted from all other timers to get the real value)
 485     TIME_LINEAR_SCAN(timer_do_nothing);
 486   }
 487   TIME_LINEAR_SCAN(timer_number_instructions);
 488 
 489   // Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node.
 490   int num_blocks = block_count();
 491   int num_instructions = 0;
 492   int i;
 493   for (i = 0; i < num_blocks; i++) {
 494     num_instructions += block_at(i)->lir()->instructions_list()->length();
 495   }
 496 
 497   // initialize with correct length
 498   _lir_ops = LIR_OpArray(num_instructions, num_instructions, NULL);
 499   _block_of_op = BlockBeginArray(num_instructions, num_instructions, NULL);
 500 
 501   int op_id = 0;
 502   int idx = 0;
 503 
 504   for (i = 0; i < num_blocks; i++) {
 505     BlockBegin* block = block_at(i);
 506     block->set_first_lir_instruction_id(op_id);
 507     LIR_OpList* instructions = block->lir()->instructions_list();
 508 
 509     int num_inst = instructions->length();
 510     for (int j = 0; j < num_inst; j++) {
 511       LIR_Op* op = instructions->at(j);
 512       op->set_id(op_id);
 513 
 514       _lir_ops.at_put(idx, op);
 515       _block_of_op.at_put(idx, block);
 516       assert(lir_op_with_id(op_id) == op, "must match");
 517 
 518       idx++;
 519       op_id += 2; // numbering of lir_ops by two


2489       assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions");
2490     }
2491   }
2492 }
2493 
2494 
2495 // frequently used constants
2496 // Allocate them with new so they are never destroyed (otherwise, a
2497 // forced exit could destroy these objects while they are still in
2498 // use).
2499 ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL);
2500 ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1);
2501 ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(0);
2502 ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1);
2503 ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2);
2504 LocationValue*         _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location());
2505 
2506 void LinearScan::init_compute_debug_info() {
2507   // cache for frequently used scope values
2508   // (cpu registers and stack slots)
2509   int cache_size = (LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2;
2510   _scope_value_cache = ScopeValueArray(cache_size, cache_size, NULL);
2511 }
2512 
2513 MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) {
2514   Location loc;
2515   if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) {
2516     bailout("too large frame");
2517   }
2518   ScopeValue* object_scope_value = new LocationValue(loc);
2519 
2520   if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) {
2521     bailout("too large frame");
2522   }
2523   return new MonitorValue(object_scope_value, loc);
2524 }
2525 
2526 LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) {
2527   Location loc;
2528   if (!frame_map()->locations_for_slot(name, loc_type, &loc)) {
2529     bailout("too large frame");
2530   }


3025           !dst->is_pointer() && !src->is_pointer() &&
3026           src->is_same_register(dst)) {
3027         instructions->at_put(j, NULL);
3028         has_dead = true;
3029       }
3030     }
3031   }
3032 
3033   if (has_dead) {
3034     // iterate all instructions of the block and remove all null-values.
3035     int insert_point = 0;
3036     for (int j = 0; j < num_inst; j++) {
3037       LIR_Op* op = instructions->at(j);
3038       if (op != NULL) {
3039         if (insert_point != j) {
3040           instructions->at_put(insert_point, op);
3041         }
3042         insert_point++;
3043       }
3044     }
3045     instructions->trunc_to(insert_point);
3046   }
3047 }
3048 
3049 void LinearScan::assign_reg_num() {
3050   TIME_LINEAR_SCAN(timer_assign_reg_num);
3051 
3052   init_compute_debug_info();
3053   IntervalWalker* iw = init_compute_oop_maps();
3054 
3055   int num_blocks = block_count();
3056   for (int i = 0; i < num_blocks; i++) {
3057     BlockBegin* block = block_at(i);
3058     assign_reg_num(block->lir()->instructions_list(), iw);
3059   }
3060 }
3061 
3062 
3063 void LinearScan::do_linear_scan() {
3064   NOT_PRODUCT(_total_timer.begin_method());
3065 


3429 
3430   // accessors
3431   IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); }
3432   void          set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); }
3433   void          add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); }
3434 
3435   // helper functions
3436   IntervalList* copy(IntervalList* input_state);
3437   void          state_put(IntervalList* input_state, int reg, Interval* interval);
3438   bool          check_state(IntervalList* input_state, int reg, Interval* interval);
3439 
3440   void process_block(BlockBegin* block);
3441   void process_xhandler(XHandler* xhandler, IntervalList* input_state);
3442   void process_successor(BlockBegin* block, IntervalList* input_state);
3443   void process_operations(LIR_List* ops, IntervalList* input_state);
3444 
3445  public:
3446   RegisterVerifier(LinearScan* allocator)
3447     : _allocator(allocator)
3448     , _work_list(16)
3449     , _saved_states(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), NULL)
3450   { }
3451 
3452   void verify(BlockBegin* start);
3453 };
3454 
3455 
3456 // entry function from LinearScan that starts the verification
3457 void LinearScan::verify_registers() {
3458   RegisterVerifier verifier(this);
3459   verifier.verify(block_at(0));
3460 }
3461 
3462 
3463 void RegisterVerifier::verify(BlockBegin* start) {
3464   // setup input registers (method arguments) for first block
3465   int input_state_len = state_size();
3466   IntervalList* input_state = new IntervalList(input_state_len, input_state_len, NULL);
3467   CallingConvention* args = compilation()->frame_map()->incoming_arguments();
3468   for (int n = 0; n < args->length(); n++) {
3469     LIR_Opr opr = args->at(n);


4435     assert(prev != NULL, "split before start of first range");
4436     result->_first = cur;
4437     prev->set_next(Range::end());
4438   }
4439   result->_current = result->_first;
4440   _cached_to = -1; // clear cached value
4441 
4442   // split list of use positions
4443   int total_len = _use_pos_and_kinds.length();
4444   int start_idx = total_len - 2;
4445   while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) {
4446     start_idx -= 2;
4447   }
4448 
4449   intStack new_use_pos_and_kinds(total_len - start_idx);
4450   int i;
4451   for (i = start_idx + 2; i < total_len; i++) {
4452     new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i));
4453   }
4454 
4455   _use_pos_and_kinds.trunc_to(start_idx + 2);
4456   result->_use_pos_and_kinds = _use_pos_and_kinds;
4457   _use_pos_and_kinds = new_use_pos_and_kinds;
4458 
4459 #ifdef ASSERT
4460   assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4461   assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
4462   assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries");
4463 
4464   for (i = 0; i < _use_pos_and_kinds.length(); i += 2) {
4465     assert(_use_pos_and_kinds.at(i) < split_pos, "must be");
4466     assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4467   }
4468   for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) {
4469     assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be");
4470     assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
4471   }
4472 #endif
4473 
4474   return result;
4475 }


5523   if (_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to) {
5524     *need_split = true;
5525   }
5526 
5527   return max_reg;
5528 }
5529 
5530 void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) {
5531   assert(reg != any_reg, "no register assigned");
5532 
5533   for (int i = 0; i < _spill_intervals[reg]->length(); i++) {
5534     Interval* it = _spill_intervals[reg]->at(i);
5535     remove_from_list(it);
5536     split_and_spill_interval(it);
5537   }
5538 
5539   if (regHi != any_reg) {
5540     IntervalList* processed = _spill_intervals[reg];
5541     for (int i = 0; i < _spill_intervals[regHi]->length(); i++) {
5542       Interval* it = _spill_intervals[regHi]->at(i);
5543       if (processed->find(it) == -1) {
5544         remove_from_list(it);
5545         split_and_spill_interval(it);
5546       }
5547     }
5548   }
5549 }
5550 
5551 
5552 // Split an Interval and spill it to memory so that cur can be placed in a register
5553 void LinearScanWalker::alloc_locked_reg(Interval* cur) {
5554   TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print());
5555 
5556   // collect current usage of registers
5557   init_use_lists(false);
5558   spill_exclude_active_fixed();
5559 //  spill_block_unhandled_fixed(cur);
5560   assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
5561   spill_block_inactive_fixed(cur);
5562   spill_collect_active_any();
5563   spill_collect_inactive_any(cur);


6194   int num_blocks = code->length();
6195 
6196   while (old_pos < num_blocks) {
6197     BlockBegin* block = code->at(old_pos);
6198 
6199     if (can_delete_block(block)) {
6200       BlockBegin* new_target = block->sux_at(0);
6201 
6202       // propagate backward branch target flag for correct code alignment
6203       if (block->is_set(BlockBegin::backward_branch_target_flag)) {
6204         new_target->set(BlockBegin::backward_branch_target_flag);
6205       }
6206 
6207       // collect a list with all predecessors that contains each predecessor only once
6208       // the predecessors of cur are changed during the substitution, so a copy of the
6209       // predecessor list is necessary
6210       int j;
6211       _original_preds.clear();
6212       for (j = block->number_of_preds() - 1; j >= 0; j--) {
6213         BlockBegin* pred = block->pred_at(j);
6214         if (_original_preds.find(pred) == -1) {
6215           _original_preds.append(pred);
6216         }
6217       }
6218 
6219       for (j = _original_preds.length() - 1; j >= 0; j--) {
6220         BlockBegin* pred = _original_preds.at(j);
6221         substitute_branch_target(pred, block, new_target);
6222         pred->substitute_sux(block, new_target);
6223       }
6224     } else {
6225       // adjust position of this block in the block list if blocks before
6226       // have been deleted
6227       if (new_pos != old_pos) {
6228         code->at_put(new_pos, code->at(old_pos));
6229       }
6230       new_pos++;
6231     }
6232     old_pos++;
6233   }
6234   code->trunc_to(new_pos);
6235 
6236   DEBUG_ONLY(verify(code));
6237 }
6238 
6239 void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
6240   // skip the last block because there a branch is always necessary
6241   for (int i = code->length() - 2; i >= 0; i--) {
6242     BlockBegin* block = code->at(i);
6243     LIR_OpList* instructions = block->lir()->instructions_list();
6244 
6245     LIR_Op* last_op = instructions->last();
6246     if (last_op->code() == lir_branch) {
6247       assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6248       LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op;
6249 
6250       assert(last_branch->block() != NULL, "last branch must always have a block as target");
6251       assert(last_branch->label() == last_branch->block()->label(), "must be equal");
6252 
6253       if (last_branch->info() == NULL) {
6254         if (last_branch->block() == code->at(i + 1)) {
6255 
6256           TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
6257 
6258           // delete last branch instruction
6259           instructions->trunc_to(instructions->length() - 1);
6260 
6261         } else {
6262           LIR_Op* prev_op = instructions->at(instructions->length() - 2);
6263           if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
6264             assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
6265             LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
6266 
6267             if (prev_branch->stub() == NULL) {
6268 
6269               LIR_Op2* prev_cmp = NULL;
6270               // There might be a cmove inserted for profiling which depends on the same
6271               // compare. If we change the condition of the respective compare, we have
6272               // to take care of this cmove as well.
6273               LIR_Op2* prev_cmove = NULL;
6274 
6275               for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
6276                 prev_op = instructions->at(j);
6277                 // check for the cmove
6278                 if (prev_op->code() == lir_cmove) {
6279                   assert(prev_op->as_Op2() != NULL, "cmove must be of type LIR_Op2");
6280                   prev_cmove = (LIR_Op2*)prev_op;
6281                   assert(prev_branch->cond() == prev_cmove->condition(), "should be the same");
6282                 }
6283                 if (prev_op->code() == lir_cmp) {
6284                   assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
6285                   prev_cmp = (LIR_Op2*)prev_op;
6286                   assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
6287                 }
6288               }
6289               assert(prev_cmp != NULL, "should have found comp instruction for branch");
6290               if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
6291 
6292                 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
6293 
6294                 // eliminate a conditional branch to the immediate successor
6295                 prev_branch->change_block(last_branch->block());
6296                 prev_branch->negate_cond();
6297                 prev_cmp->set_condition(prev_branch->cond());
6298                 instructions->trunc_to(instructions->length() - 1);
6299                 // if we do change the condition, we have to change the cmove as well
6300                 if (prev_cmove != NULL) {
6301                   prev_cmove->set_condition(prev_branch->cond());
6302                   LIR_Opr t = prev_cmove->in_opr1();
6303                   prev_cmove->set_in_opr1(prev_cmove->in_opr2());
6304                   prev_cmove->set_in_opr2(t);
6305                 }
6306               }
6307             }
6308           }
6309         }
6310       }
6311     }
6312   }
6313 
6314   DEBUG_ONLY(verify(code));
6315 }
6316 
6317 void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
6318 #ifdef ASSERT


6361 #endif
6362           }
6363         }
6364       }
6365     }
6366   }
6367 }
6368 
6369 
6370 #ifdef ASSERT
6371 void ControlFlowOptimizer::verify(BlockList* code) {
6372   for (int i = 0; i < code->length(); i++) {
6373     BlockBegin* block = code->at(i);
6374     LIR_OpList* instructions = block->lir()->instructions_list();
6375 
6376     int j;
6377     for (j = 0; j < instructions->length(); j++) {
6378       LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch();
6379 
6380       if (op_branch != NULL) {
6381         assert(op_branch->block() == NULL || code->find(op_branch->block()) != -1, "branch target not valid");
6382         assert(op_branch->ublock() == NULL || code->find(op_branch->ublock()) != -1, "branch target not valid");
6383       }
6384     }
6385 
6386     for (j = 0; j < block->number_of_sux() - 1; j++) {
6387       BlockBegin* sux = block->sux_at(j);
6388       assert(code->find(sux) != -1, "successor not valid");
6389     }
6390 
6391     for (j = 0; j < block->number_of_preds() - 1; j++) {
6392       BlockBegin* pred = block->pred_at(j);
6393       assert(code->find(pred) != -1, "successor not valid");
6394     }
6395   }
6396 }
6397 #endif
6398 
6399 
6400 #ifndef PRODUCT
6401 
6402 // Implementation of LinearStatistic
6403 
6404 const char* LinearScanStatistic::counter_name(int counter_idx) {
6405   switch (counter_idx) {
6406     case counter_method:          return "compiled methods";
6407     case counter_fpu_method:      return "methods using fpu";
6408     case counter_loop_method:     return "methods with loops";
6409     case counter_exception_method:return "methods with xhandler";
6410 
6411     case counter_loop:            return "loops";
6412     case counter_block:           return "blocks";
6413     case counter_loop_block:      return "blocks inside loop";


< prev index next >