160 Compilation* compilation() const { return _compilation; }
161 LIRGenerator* gen() const { return _gen; }
162 FrameMap* frame_map() const { return _frame_map; }
163
164 // unified bailout support
165 void bailout(const char* msg) const { compilation()->bailout(msg); }
166 bool bailed_out() const { return compilation()->bailed_out(); }
167
168 // access to block list (sorted in linear scan order)
169 int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
170 BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); }
171
172 int num_virtual_regs() const { return _num_virtual_regs; }
173 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
174 int live_set_size() const { return align_up(_num_virtual_regs, BitsPerWord); }
175 bool has_fpu_registers() const { return _has_fpu_registers; }
176 int num_loops() const { return ir()->num_loops(); }
177 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
178
179 // handling of fpu stack allocation (platform dependent, needed for debug information generation)
180 #ifdef X86
181 FpuStackAllocator* _fpu_stack_allocator;
182 bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
183 #else
184 bool use_fpu_stack_allocation() const { return false; }
185 #endif
186
187
188 // access to interval list
189 int interval_count() const { return _intervals.length(); }
190 Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }
191
192 // access to LIR_Ops and Blocks indexed by op_id
193 int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
194 LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
195 BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
196
197 bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
198
199 bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
200 bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
|
160 Compilation* compilation() const { return _compilation; }
161 LIRGenerator* gen() const { return _gen; }
162 FrameMap* frame_map() const { return _frame_map; }
163
164 // unified bailout support
165 void bailout(const char* msg) const { compilation()->bailout(msg); }
166 bool bailed_out() const { return compilation()->bailed_out(); }
167
168 // access to block list (sorted in linear scan order)
169 int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
170 BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); }
171
172 int num_virtual_regs() const { return _num_virtual_regs; }
173 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
174 int live_set_size() const { return align_up(_num_virtual_regs, BitsPerWord); }
175 bool has_fpu_registers() const { return _has_fpu_registers; }
176 int num_loops() const { return ir()->num_loops(); }
177 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
178
179 // handling of fpu stack allocation (platform dependent, needed for debug information generation)
180 #ifdef IA32
181 FpuStackAllocator* _fpu_stack_allocator;
182 bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
183 #else
184 bool use_fpu_stack_allocation() const { return false; }
185 #endif
186
187
188 // access to interval list
189 int interval_count() const { return _intervals.length(); }
190 Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }
191
192 // access to LIR_Ops and Blocks indexed by op_id
193 int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
194 LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
195 BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
196
197 bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
198
199 bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
200 bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
|