1 /*
2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
25 #ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP
26 #define SHARE_VM_C1_C1_LINEARSCAN_HPP
27
28 #include "c1/c1_FpuStackSim.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_IR.hpp"
31 #include "c1/c1_Instruction.hpp"
32 #include "c1/c1_LIR.hpp"
33 #include "c1/c1_LIRGenerator.hpp"
34
35 class DebugInfoCache;
36 class FpuStackAllocator;
37 class IRScopeDebugInfo;
38 class Interval;
39 class IntervalWalker;
40 class LIRGenerator;
41 class LinearScan;
42 class MoveResolver;
43 class Range;
44
45 define_array(IntervalArray, Interval*)
46 define_stack(IntervalList, IntervalArray)
47
48 define_array(IntervalsArray, IntervalList*)
49 define_stack(IntervalsList, IntervalsArray)
50
51 define_array(OopMapArray, OopMap*)
52 define_stack(OopMapList, OopMapArray)
53
54 define_array(ScopeValueArray, ScopeValue*)
55
56 define_array(LIR_OpListArray, LIR_OpList*);
57 define_stack(LIR_OpListStack, LIR_OpListArray);
58
59
60 enum IntervalUseKind {
61 // priority of use kinds must be ascending
62 noUse = 0,
63 loopEndMarker = 1,
64 shouldHaveRegister = 2,
65 mustHaveRegister = 3,
66
67 firstValidKind = 1,
68 lastValidKind = 3
128 any_reg = -1,
129 nof_cpu_regs = pd_nof_cpu_regs_linearscan,
130 nof_fpu_regs = pd_nof_fpu_regs_linearscan,
131 nof_xmm_regs = pd_nof_xmm_regs_linearscan,
132 nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
133 };
134
135 private:
136 Compilation* _compilation;
137 IR* _ir;
138 LIRGenerator* _gen;
139 FrameMap* _frame_map;
140
141 BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
142 int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals)
143 bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
144 int _num_calls; // total number of calls in this method
145 int _max_spills; // number of stack slots used for intervals allocated to memory
146 int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
147
148 IntervalList _intervals; // mapping from register number to interval
149 IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
150 IntervalArray* _sorted_intervals; // intervals sorted by Interval::from()
151 bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
152
153 LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
154 BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
155 BitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo
156 BitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers
157 BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop
158
159 // cached debug info to prevent multiple creation of same object
160 // TODO: cached scope values for registers could be static
161 ScopeValueArray _scope_value_cache;
162
163 static ConstantOopWriteValue* _oop_null_scope_value;
164 static ConstantIntValue* _int_m1_scope_value;
165 static ConstantIntValue* _int_0_scope_value;
166 static ConstantIntValue* _int_1_scope_value;
167 static ConstantIntValue* _int_2_scope_value;
168
169 // accessors
170 IR* ir() const { return _ir; }
183 int num_virtual_regs() const { return _num_virtual_regs; }
184 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
185 int live_set_size() const { return round_to(_num_virtual_regs, BitsPerWord); }
186 bool has_fpu_registers() const { return _has_fpu_registers; }
187 int num_loops() const { return ir()->num_loops(); }
188 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
189
190 // handling of fpu stack allocation (platform dependent, needed for debug information generation)
191 #ifdef X86
192 FpuStackAllocator* _fpu_stack_allocator;
193 bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
194 #else
195 bool use_fpu_stack_allocation() const { return false; }
196 #endif
197
198
199 // access to interval list
200 int interval_count() const { return _intervals.length(); }
201 Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }
202
203 IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }
204
205 // access to LIR_Ops and Blocks indexed by op_id
206 int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
207 LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
208 BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
209
210 bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
211 bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
212
213 bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
214 bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
215
216
217 // functions for converting LIR-Operands to register numbers
218 static bool is_valid_reg_num(int reg_num) { return reg_num >= 0; }
219 static int reg_num(LIR_Opr opr);
220 static int reg_numHi(LIR_Opr opr);
221
222 // functions for classification of intervals
223 static bool is_precolored_interval(const Interval* i);
284 void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
285 void add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type);
286
287 // Add platform dependent kills for particular LIR ops. Can be used
288 // to add platform dependent behaviour for some operations.
289 void pd_add_temps(LIR_Op* op);
290
291 IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);
292 IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);
293 void handle_method_arguments(LIR_Op* op);
294 void handle_doubleword_moves(LIR_Op* op);
295 void add_register_hints(LIR_Op* op);
296
297 void build_intervals();
298
299
300 // Phase 5: actual register allocation
301 // (Uses LinearScanWalker)
302 //
303 // helper functions for building a sorted list of intervals
304 NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);)
305 static int interval_cmp(Interval** a, Interval** b);
306 void add_to_list(Interval** first, Interval** prev, Interval* interval);
307 void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
308
309 void sort_intervals_before_allocation();
310 void sort_intervals_after_allocation();
311 void allocate_registers();
312
313
314 // Phase 6: resolve data flow
315 // (insert moves at edges between blocks if intervals have been split)
316 //
317 // helper functions for resolve_data_flow()
318 Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);
319 Interval* interval_at_block_begin(BlockBegin* block, int reg_num);
320 Interval* interval_at_block_end(BlockBegin* block, int reg_num);
321 Interval* interval_at_op_id(int reg_num, int op_id);
322 void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
323 void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
324 void resolve_data_flow();
410 static void print_timers(double total);
411 #endif
412 };
413
414
415 // Helper class for ordering moves that are inserted at the same position in the LIR
416 // When moves between registers are inserted, it is important that the moves are
417 // ordered such that no register is overwritten. So moves from register to stack
418 // are processed prior to moves from stack to register. When moves have circular
419 // dependencies, a temporary stack slot is used to break the circle.
420 // The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow
421 // and therefore factored out in a separate class
422 class MoveResolver: public StackObj {
423 private:
424 LinearScan* _allocator;
425
426 LIR_List* _insert_list;
427 int _insert_idx;
428 LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted
429
430 IntervalList _mapping_from;
431 LIR_OprList _mapping_from_opr;
432 IntervalList _mapping_to;
433 bool _multiple_reads_allowed;
434 int _register_blocked[LinearScan::nof_regs];
435
436 int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
437 void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
438
439 void block_registers(Interval* it);
440 void unblock_registers(Interval* it);
441 bool save_to_process_move(Interval* from, Interval* to);
442
443 void create_insertion_buffer(LIR_List* list);
444 void append_insertion_buffer();
445 void insert_move(Interval* from_interval, Interval* to_interval);
446 void insert_move(LIR_Opr from_opr, Interval* to_interval);
447
448 DEBUG_ONLY(void verify_before_resolve();)
449 void resolve_mappings();
450 public:
451 MoveResolver(LinearScan* allocator);
452
516 static Interval* _end; // sentinel (interval with only range Range::end())
517
518 int _reg_num;
519 BasicType _type; // valid only for virtual registers
520 Range* _first; // sorted list of Ranges
521 intStack _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds
522
523 Range* _current; // interval iteration: the current Range
524 Interval* _next; // interval iteration: sorted list of Intervals (ends with sentinel)
525 IntervalState _state; // interval iteration: to which set belongs this interval
526
527
528 int _assigned_reg;
529 int _assigned_regHi;
530
531 int _cached_to; // cached value: to of last range (-1: not cached)
532 LIR_Opr _cached_opr;
533 VMReg _cached_vm_reg;
534
535 Interval* _split_parent; // the original interval where this interval is derived from
536 IntervalList _split_children; // list of all intervals that are split off from this interval (only available for split parents)
537 Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents)
538
539 int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
540 bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
541 IntervalSpillState _spill_state; // for spill move optimization
542 int _spill_definition_pos; // position where the interval is defined (if defined only once)
543 Interval* _register_hint; // this interval should be in the same register as the hint interval
544
545 int calc_to();
546 Interval* new_split_child();
547 public:
548 Interval(int reg_num);
549
550 static void initialize(Arena* arena);
551 static Interval* end() { return _end; }
552
553 // accessors
554 int reg_num() const { return _reg_num; }
555 void set_reg_num(int r) { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }
556 BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
705 void walk() { walk_to(max_jint); }
706
707 int current_position() { return _current_position; }
708 };
709
710
711 // The actual linear scan register allocator
712 class LinearScanWalker : public IntervalWalker {
713 enum {
714 any_reg = LinearScan::any_reg
715 };
716
717 private:
718 int _first_reg; // the reg. number of the first phys. register
719 int _last_reg; // the reg. nmber of the last phys. register
720 int _num_phys_regs; // required by current interval
721 bool _adjacent_regs; // have lo/hi words of phys. regs be adjacent
722
723 int _use_pos[LinearScan::nof_regs];
724 int _block_pos[LinearScan::nof_regs];
725 IntervalList* _spill_intervals[LinearScan::nof_regs];
726
727 MoveResolver _move_resolver; // for ordering spill moves
728
729 // accessors mapped to same functions in class LinearScan
730 int block_count() const { return allocator()->block_count(); }
731 BlockBegin* block_at(int idx) const { return allocator()->block_at(idx); }
732 BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
733
734 void init_use_lists(bool only_process_use_pos);
735 void exclude_from_use(int reg);
736 void exclude_from_use(Interval* i);
737 void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);
738 void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);
739 void set_block_pos(int reg, Interval* i, int block_pos);
740 void set_block_pos(Interval* i, int block_pos);
741
742 void free_exclude_active_fixed();
743 void free_exclude_active_any();
744 void free_collect_inactive_fixed(Interval* cur);
745 void free_collect_inactive_any(Interval* cur);
|
1 /*
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
25 #ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP
26 #define SHARE_VM_C1_C1_LINEARSCAN_HPP
27
28 #include "c1/c1_FpuStackSim.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_IR.hpp"
31 #include "c1/c1_Instruction.hpp"
32 #include "c1/c1_LIR.hpp"
33 #include "c1/c1_LIRGenerator.hpp"
34
35 class DebugInfoCache;
36 class FpuStackAllocator;
37 class IRScopeDebugInfo;
38 class Interval;
39 class IntervalWalker;
40 class LIRGenerator;
41 class LinearScan;
42 class MoveResolver;
43 class Range;
44
45 define_array(IntervalsArray, GrowableArray<Interval*>*)
46 define_stack(IntervalsList, IntervalsArray)
47
48 define_array(OopMapArray, OopMap*)
49 define_stack(OopMapList, OopMapArray)
50
51 define_array(ScopeValueArray, ScopeValue*)
52
53 define_array(LIR_OpListArray, LIR_OpList*);
54 define_stack(LIR_OpListStack, LIR_OpListArray);
55
56
57 enum IntervalUseKind {
58 // priority of use kinds must be ascending
59 noUse = 0,
60 loopEndMarker = 1,
61 shouldHaveRegister = 2,
62 mustHaveRegister = 3,
63
64 firstValidKind = 1,
65 lastValidKind = 3
125 any_reg = -1,
126 nof_cpu_regs = pd_nof_cpu_regs_linearscan,
127 nof_fpu_regs = pd_nof_fpu_regs_linearscan,
128 nof_xmm_regs = pd_nof_xmm_regs_linearscan,
129 nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
130 };
131
132 private:
133 Compilation* _compilation;
134 IR* _ir;
135 LIRGenerator* _gen;
136 FrameMap* _frame_map;
137
138 BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
139 int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals)
140 bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
141 int _num_calls; // total number of calls in this method
142 int _max_spills; // number of stack slots used for intervals allocated to memory
143 int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
144
145 GrowableArray<Interval*> _intervals; // mapping from register number to interval
146 GrowableArray<Interval*>* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
147 GrowableArray<Interval*>* _sorted_intervals; // intervals sorted by Interval::from()
148 bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
149
150 LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
151 BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
152 BitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo
153 BitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers
154 BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop
155
156 // cached debug info to prevent multiple creation of same object
157 // TODO: cached scope values for registers could be static
158 ScopeValueArray _scope_value_cache;
159
160 static ConstantOopWriteValue* _oop_null_scope_value;
161 static ConstantIntValue* _int_m1_scope_value;
162 static ConstantIntValue* _int_0_scope_value;
163 static ConstantIntValue* _int_1_scope_value;
164 static ConstantIntValue* _int_2_scope_value;
165
166 // accessors
167 IR* ir() const { return _ir; }
180 int num_virtual_regs() const { return _num_virtual_regs; }
181 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
182 int live_set_size() const { return round_to(_num_virtual_regs, BitsPerWord); }
183 bool has_fpu_registers() const { return _has_fpu_registers; }
184 int num_loops() const { return ir()->num_loops(); }
185 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
186
187 // handling of fpu stack allocation (platform dependent, needed for debug information generation)
188 #ifdef X86
189 FpuStackAllocator* _fpu_stack_allocator;
190 bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
191 #else
192 bool use_fpu_stack_allocation() const { return false; }
193 #endif
194
195
196 // access to interval list
197 int interval_count() const { return _intervals.length(); }
198 Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }
199
200 GrowableArray<Interval*>* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }
201
202 // access to LIR_Ops and Blocks indexed by op_id
203 int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
204 LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
205 BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
206
207 bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
208 bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
209
210 bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
211 bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
212
213
214 // functions for converting LIR-Operands to register numbers
215 static bool is_valid_reg_num(int reg_num) { return reg_num >= 0; }
216 static int reg_num(LIR_Opr opr);
217 static int reg_numHi(LIR_Opr opr);
218
219 // functions for classification of intervals
220 static bool is_precolored_interval(const Interval* i);
281 void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
282 void add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type);
283
284 // Add platform dependent kills for particular LIR ops. Can be used
285 // to add platform dependent behaviour for some operations.
286 void pd_add_temps(LIR_Op* op);
287
288 IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);
289 IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);
290 void handle_method_arguments(LIR_Op* op);
291 void handle_doubleword_moves(LIR_Op* op);
292 void add_register_hints(LIR_Op* op);
293
294 void build_intervals();
295
296
297 // Phase 5: actual register allocation
298 // (Uses LinearScanWalker)
299 //
300 // helper functions for building a sorted list of intervals
301 NOT_PRODUCT(bool is_sorted(GrowableArray<Interval*>* intervals);)
302 static int interval_cmp(Interval** a, Interval** b);
303 void add_to_list(Interval** first, Interval** prev, Interval* interval);
304 void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
305
306 void sort_intervals_before_allocation();
307 void sort_intervals_after_allocation();
308 void allocate_registers();
309
310
311 // Phase 6: resolve data flow
312 // (insert moves at edges between blocks if intervals have been split)
313 //
314 // helper functions for resolve_data_flow()
315 Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);
316 Interval* interval_at_block_begin(BlockBegin* block, int reg_num);
317 Interval* interval_at_block_end(BlockBegin* block, int reg_num);
318 Interval* interval_at_op_id(int reg_num, int op_id);
319 void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
320 void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
321 void resolve_data_flow();
407 static void print_timers(double total);
408 #endif
409 };
410
411
412 // Helper class for ordering moves that are inserted at the same position in the LIR
413 // When moves between registers are inserted, it is important that the moves are
414 // ordered such that no register is overwritten. So moves from register to stack
415 // are processed prior to moves from stack to register. When moves have circular
416 // dependencies, a temporary stack slot is used to break the circle.
417 // The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow
418 // and therefore factored out in a separate class
419 class MoveResolver: public StackObj {
420 private:
421 LinearScan* _allocator;
422
423 LIR_List* _insert_list;
424 int _insert_idx;
425 LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted
426
427 GrowableArray<Interval*> _mapping_from;
428 LIR_OprList _mapping_from_opr;
429 GrowableArray<Interval*> _mapping_to;
430 bool _multiple_reads_allowed;
431 int _register_blocked[LinearScan::nof_regs];
432
433 int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
434 void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
435
436 void block_registers(Interval* it);
437 void unblock_registers(Interval* it);
438 bool save_to_process_move(Interval* from, Interval* to);
439
440 void create_insertion_buffer(LIR_List* list);
441 void append_insertion_buffer();
442 void insert_move(Interval* from_interval, Interval* to_interval);
443 void insert_move(LIR_Opr from_opr, Interval* to_interval);
444
445 DEBUG_ONLY(void verify_before_resolve();)
446 void resolve_mappings();
447 public:
448 MoveResolver(LinearScan* allocator);
449
513 static Interval* _end; // sentinel (interval with only range Range::end())
514
515 int _reg_num;
516 BasicType _type; // valid only for virtual registers
517 Range* _first; // sorted list of Ranges
518 intStack _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds
519
520 Range* _current; // interval iteration: the current Range
521 Interval* _next; // interval iteration: sorted list of Intervals (ends with sentinel)
522 IntervalState _state; // interval iteration: to which set belongs this interval
523
524
525 int _assigned_reg;
526 int _assigned_regHi;
527
528 int _cached_to; // cached value: to of last range (-1: not cached)
529 LIR_Opr _cached_opr;
530 VMReg _cached_vm_reg;
531
532 Interval* _split_parent; // the original interval where this interval is derived from
533 GrowableArray<Interval*> _split_children; // list of all intervals that are split off from this interval (only available for split parents)
534 Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents)
535
536 int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
537 bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
538 IntervalSpillState _spill_state; // for spill move optimization
539 int _spill_definition_pos; // position where the interval is defined (if defined only once)
540 Interval* _register_hint; // this interval should be in the same register as the hint interval
541
542 int calc_to();
543 Interval* new_split_child();
544 public:
545 Interval(int reg_num);
546
547 static void initialize(Arena* arena);
548 static Interval* end() { return _end; }
549
550 // accessors
551 int reg_num() const { return _reg_num; }
552 void set_reg_num(int r) { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }
553 BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
702 void walk() { walk_to(max_jint); }
703
704 int current_position() { return _current_position; }
705 };
706
707
708 // The actual linear scan register allocator
709 class LinearScanWalker : public IntervalWalker {
710 enum {
711 any_reg = LinearScan::any_reg
712 };
713
714 private:
715 int _first_reg; // the reg. number of the first phys. register
716 int _last_reg; // the reg. nmber of the last phys. register
717 int _num_phys_regs; // required by current interval
718 bool _adjacent_regs; // have lo/hi words of phys. regs be adjacent
719
720 int _use_pos[LinearScan::nof_regs];
721 int _block_pos[LinearScan::nof_regs];
722 GrowableArray<Interval*>* _spill_intervals[LinearScan::nof_regs];
723
724 MoveResolver _move_resolver; // for ordering spill moves
725
726 // accessors mapped to same functions in class LinearScan
727 int block_count() const { return allocator()->block_count(); }
728 BlockBegin* block_at(int idx) const { return allocator()->block_at(idx); }
729 BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
730
731 void init_use_lists(bool only_process_use_pos);
732 void exclude_from_use(int reg);
733 void exclude_from_use(Interval* i);
734 void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);
735 void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);
736 void set_block_pos(int reg, Interval* i, int block_pos);
737 void set_block_pos(Interval* i, int block_pos);
738
739 void free_exclude_active_fixed();
740 void free_exclude_active_any();
741 void free_collect_inactive_fixed(Interval* cur);
742 void free_collect_inactive_any(Interval* cur);
|