7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
25 #define SHARE_VM_C1_C1_LIRGENERATOR_HPP
26
27 #include "c1/c1_Instruction.hpp"
28 #include "c1/c1_LIR.hpp"
29 #include "ci/ciMethodData.hpp"
30 #include "utilities/macros.hpp"
31 #include "utilities/sizes.hpp"
32
33 // The classes responsible for code emission and register allocation
34
35
36 class LIRGenerator;
37 class LIREmitter;
38 class Invoke;
39 class SwitchRange;
40 class LIRItem;
41
42 typedef GrowableArray<LIRItem*> LIRItemList;
43
44 class SwitchRange: public CompilationResourceObj {
45 private:
|
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
25 #define SHARE_VM_C1_C1_LIRGENERATOR_HPP
26
27 #include "gc/shared/c1/barrierSetC1.hpp"
28 #include "c1/c1_Decorators.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIR.hpp"
31 #include "ci/ciMethodData.hpp"
32 #include "utilities/macros.hpp"
33 #include "utilities/sizes.hpp"
34
35 // The classes responsible for code emission and register allocation
36
37
38 class LIRGenerator;
39 class LIREmitter;
40 class Invoke;
41 class SwitchRange;
42 class LIRItem;
43
44 typedef GrowableArray<LIRItem*> LIRItemList;
45
46 class SwitchRange: public CompilationResourceObj {
47 private:
|
132
133 void emit_move(LIR_Opr src, LIR_Opr dest);
134 void move_to_temp(LIR_Opr src);
135 void move_temp_to(LIR_Opr dest);
136 void move(ResolveNode* src, ResolveNode* dest);
137
138 LIRGenerator* gen() {
139 return _gen;
140 }
141
142 public:
143 PhiResolver(LIRGenerator* _lir_gen, int max_vregs);
144 ~PhiResolver();
145
146 void move(LIR_Opr src, LIR_Opr dest);
147 };
148
149
150 // only the classes below belong in the same file
151 class LIRGenerator: public InstructionVisitor, public BlockClosure {
152 // LIRGenerator should never get instatiated on the heap.
153 private:
154 void* operator new(size_t size) throw();
155 void* operator new[](size_t size) throw();
156 void operator delete(void* p) { ShouldNotReachHere(); }
157 void operator delete[](void* p) { ShouldNotReachHere(); }
158
159 Compilation* _compilation;
160 ciMethod* _method; // method that we are compiling
161 PhiResolverState _resolver_state;
162 BlockBegin* _block;
163 int _virtual_register_number;
164 Values _instruction_for_operand;
165 BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis
166 LIR_List* _lir;
167 BarrierSet* _bs;
168
169 LIRGenerator* gen() {
170 return this;
171 }
172
173 void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
174
175 #ifdef ASSERT
176 LIR_List* lir(const char * file, int line) const {
177 _lir->set_file_and_line(file, line);
178 return _lir;
179 }
180 #endif
181 LIR_List* lir() const {
182 return _lir;
183 }
184
185 // a simple cache of constants used within a block
186 GrowableArray<LIR_Const*> _constants;
|
134
135 void emit_move(LIR_Opr src, LIR_Opr dest);
136 void move_to_temp(LIR_Opr src);
137 void move_temp_to(LIR_Opr dest);
138 void move(ResolveNode* src, ResolveNode* dest);
139
140 LIRGenerator* gen() {
141 return _gen;
142 }
143
144 public:
145 PhiResolver(LIRGenerator* _lir_gen, int max_vregs);
146 ~PhiResolver();
147
148 void move(LIR_Opr src, LIR_Opr dest);
149 };
150
151
152 // only the classes below belong in the same file
153 class LIRGenerator: public InstructionVisitor, public BlockClosure {
154 friend class BarrierSetC1;
155 friend class ModRefBarrierSetC1;
156 friend class CardTableBarrierSetC1;
157 friend class G1BarrierSetC1;
158 // LIRGenerator should never get instatiated on the heap.
159 private:
160 void* operator new(size_t size) throw();
161 void* operator new[](size_t size) throw();
162 void operator delete(void* p) { ShouldNotReachHere(); }
163 void operator delete[](void* p) { ShouldNotReachHere(); }
164
165 Compilation* _compilation;
166 ciMethod* _method; // method that we are compiling
167 PhiResolverState _resolver_state;
168 BlockBegin* _block;
169 int _virtual_register_number;
170 Values _instruction_for_operand;
171 BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis
172 LIR_List* _lir;
173
174 LIRGenerator* gen() {
175 return this;
176 }
177
178 void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
179
180 #ifdef ASSERT
181 LIR_List* lir(const char * file, int line) const {
182 _lir->set_file_and_line(file, line);
183 return _lir;
184 }
185 #endif
186 LIR_List* lir() const {
187 return _lir;
188 }
189
190 // a simple cache of constants used within a block
191 GrowableArray<LIR_Const*> _constants;
|
251 void do_ArrayCopy(Intrinsic* x);
252 void do_CompareAndSwap(Intrinsic* x, ValueType* type);
253 void do_NIOCheckIndex(Intrinsic* x);
254 void do_FPIntrinsics(Intrinsic* x);
255 void do_Reference_get(Intrinsic* x);
256 void do_update_CRC32(Intrinsic* x);
257 void do_update_CRC32C(Intrinsic* x);
258 void do_vectorizedMismatch(Intrinsic* x);
259
260 LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
261 LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
262
263 // convenience functions
264 LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
265 LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
266
267 // GC Barriers
268
269 // generic interface
270
271 void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
272 void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
273
274 // specific implementations
275 // pre barriers
276
277 void G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
278 bool do_load, bool patch, CodeEmitInfo* info);
279
280 // post barriers
281
282 void G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
283 void CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
284 #ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
285 void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
286 #endif
287
288
289 static LIR_Opr result_register_for(ValueType* type, bool callee = false);
290
291 ciObject* get_jobject_constant(Value value);
292
293 LIRItemList* invoke_visit_arguments(Invoke* x);
294 void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
295
296 void trace_block_entry(BlockBegin* block);
297
298 // volatile field operations are never patchable because a klass
299 // must be loaded to know it's volatile which means that the offset
300 // it always known as well.
301 void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
302 void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
303
304 void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
305 void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
306
|
256 void do_ArrayCopy(Intrinsic* x);
257 void do_CompareAndSwap(Intrinsic* x, ValueType* type);
258 void do_NIOCheckIndex(Intrinsic* x);
259 void do_FPIntrinsics(Intrinsic* x);
260 void do_Reference_get(Intrinsic* x);
261 void do_update_CRC32(Intrinsic* x);
262 void do_update_CRC32C(Intrinsic* x);
263 void do_vectorizedMismatch(Intrinsic* x);
264
265 LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
266 LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
267
268 // convenience functions
269 LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
270 LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
271
272 // GC Barriers
273
274 // generic interface
275
276 void access_store_at(DecoratorSet decorators, BasicType type,
277 LIRItem& base, LIR_Opr offset, LIR_Opr value,
278 CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info);
279
280 void access_load_at(DecoratorSet decorators, BasicType type,
281 LIRItem& base, LIR_Opr offset, LIR_Opr result,
282 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info);
283
284 LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
285 LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
286
287 LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
288 LIRItem& base, LIRItem& offset, LIRItem& value);
289
290 LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
291 LIRItem& base, LIRItem& offset, LIRItem& value);
292
293 // These need to guarantee JMM volatile semantics are preserved on each platform
294 // and requires one implementation per architecture.
295 LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
296 LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
297 LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
298
299 // specific implementations
300 void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
301
302 static LIR_Opr result_register_for(ValueType* type, bool callee = false);
303
304 ciObject* get_jobject_constant(Value value);
305
306 LIRItemList* invoke_visit_arguments(Invoke* x);
307 void invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list);
308
309 void trace_block_entry(BlockBegin* block);
310
311 // volatile field operations are never patchable because a klass
312 // must be loaded to know it's volatile which means that the offset
313 // it always known as well.
314 void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info);
315 void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info);
316
317 void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile);
318 void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile);
319
|
336
337 void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scra
338
339 // machine dependent
340 void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
341 void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info);
342
343 void arraycopy_helper(Intrinsic* x, int* flags, ciArrayKlass** expected_type);
344
345 // returns a LIR_Address to address an array location. May also
346 // emit some code as part of address calculation. If
347 // needs_card_mark is true then compute the full address for use by
348 // both the store and the card mark.
349 LIR_Address* generate_address(LIR_Opr base,
350 LIR_Opr index, int shift,
351 int disp,
352 BasicType type);
353 LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) {
354 return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type);
355 }
356 LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark);
357
358 // the helper for generate_address
359 void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
360
361 // machine preferences and characteristics
362 bool can_inline_as_constant(Value i S390_ONLY(COMMA int bits = 20)) const;
363 bool can_inline_as_constant(LIR_Const* c) const;
364 bool can_store_as_constant(Value i, BasicType type) const;
365
366 LIR_Opr safepoint_poll_register();
367
368 void profile_branch(If* if_instr, If::Condition cond);
369 void increment_event_counter_impl(CodeEmitInfo* info,
370 ciMethod *method, int frequency,
371 int bci, bool backedge, bool notify);
372 void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
373 void increment_invocation_counter(CodeEmitInfo *info) {
374 if (compilation()->count_invocations()) {
375 increment_event_counter(info, InvocationEntryBci, false);
|
349
350 void new_instance (LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scra
351
352 // machine dependent
353 void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info);
354 void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info);
355
356 void arraycopy_helper(Intrinsic* x, int* flags, ciArrayKlass** expected_type);
357
358 // returns a LIR_Address to address an array location. May also
359 // emit some code as part of address calculation. If
360 // needs_card_mark is true then compute the full address for use by
361 // both the store and the card mark.
362 LIR_Address* generate_address(LIR_Opr base,
363 LIR_Opr index, int shift,
364 int disp,
365 BasicType type);
366 LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) {
367 return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type);
368 }
369 LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type);
370
371 // the helper for generate_address
372 void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
373
374 // machine preferences and characteristics
375 bool can_inline_as_constant(Value i S390_ONLY(COMMA int bits = 20)) const;
376 bool can_inline_as_constant(LIR_Const* c) const;
377 bool can_store_as_constant(Value i, BasicType type) const;
378
379 LIR_Opr safepoint_poll_register();
380
381 void profile_branch(If* if_instr, If::Condition cond);
382 void increment_event_counter_impl(CodeEmitInfo* info,
383 ciMethod *method, int frequency,
384 int bci, bool backedge, bool notify);
385 void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
386 void increment_invocation_counter(CodeEmitInfo *info) {
387 if (compilation()->count_invocations()) {
388 increment_event_counter(info, InvocationEntryBci, false);
|
415 static LIR_Condition lir_cond(If::Condition cond) {
416 LIR_Condition l = lir_cond_unknown;
417 switch (cond) {
418 case If::eql: l = lir_cond_equal; break;
419 case If::neq: l = lir_cond_notEqual; break;
420 case If::lss: l = lir_cond_less; break;
421 case If::leq: l = lir_cond_lessEqual; break;
422 case If::geq: l = lir_cond_greaterEqual; break;
423 case If::gtr: l = lir_cond_greater; break;
424 case If::aeq: l = lir_cond_aboveEqual; break;
425 case If::beq: l = lir_cond_belowEqual; break;
426 default: fatal("You must pass valid If::Condition");
427 };
428 return l;
429 }
430
431 #ifdef __SOFTFP__
432 void do_soft_float_compare(If *x);
433 #endif // __SOFTFP__
434
435 void init();
436
437 SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
438 SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
439 void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
440
441 #ifdef TRACE_HAVE_INTRINSICS
442 void do_ClassIDIntrinsic(Intrinsic* x);
443 void do_getBufferWriter(Intrinsic* x);
444 #endif
445
446 void do_RuntimeCall(address routine, Intrinsic* x);
447
448 ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
449 Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
450 ciKlass* callee_signature_k);
451 void profile_arguments(ProfileCall* x);
452 void profile_parameters(Base* x);
453 void profile_parameters_at_call(ProfileCall* x);
454 LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
455
456 public:
457 Compilation* compilation() const { return _compilation; }
458 FrameMap* frame_map() const { return _compilation->frame_map(); }
459 ciMethod* method() const { return _method; }
460 BlockBegin* block() const { return _block; }
461 IRScope* scope() const { return block()->scope(); }
462
463 int max_virtual_register_number() const { return _virtual_register_number; }
464
465 void block_do(BlockBegin* block);
466
467 // Flags that can be set on vregs
468 enum VregFlag {
469 must_start_in_memory = 0 // needs to be assigned a memory location at beginning, but may then be loaded in a register
470 , callee_saved = 1 // must be in a callee saved register
471 , byte_reg = 2 // must be in a byte register
472 , num_vreg_flags
473
474 };
475
476 LIRGenerator(Compilation* compilation, ciMethod* method)
477 : _compilation(compilation)
478 , _method(method)
479 , _virtual_register_number(LIR_OprDesc::vreg_base)
480 , _vreg_flags(num_vreg_flags) {
481 init();
482 }
483
484 // for virtual registers, maps them back to Phi's or Local's
485 Instruction* instruction_for_opr(LIR_Opr opr);
486 Instruction* instruction_for_vreg(int reg_num);
487
488 void set_vreg_flag (int vreg_num, VregFlag f);
489 bool is_vreg_flag_set(int vreg_num, VregFlag f);
490 void set_vreg_flag (LIR_Opr opr, VregFlag f) { set_vreg_flag(opr->vreg_number(), f); }
491 bool is_vreg_flag_set(LIR_Opr opr, VregFlag f) { return is_vreg_flag_set(opr->vreg_number(), f); }
492
493 // statics
494 static LIR_Opr exceptionOopOpr();
495 static LIR_Opr exceptionPcOpr();
496 static LIR_Opr divInOpr();
497 static LIR_Opr divOutOpr();
498 static LIR_Opr remOutOpr();
499 #ifdef S390
500 // On S390 we can do ldiv, lrem without RT call.
|
428 static LIR_Condition lir_cond(If::Condition cond) {
429 LIR_Condition l = lir_cond_unknown;
430 switch (cond) {
431 case If::eql: l = lir_cond_equal; break;
432 case If::neq: l = lir_cond_notEqual; break;
433 case If::lss: l = lir_cond_less; break;
434 case If::leq: l = lir_cond_lessEqual; break;
435 case If::geq: l = lir_cond_greaterEqual; break;
436 case If::gtr: l = lir_cond_greater; break;
437 case If::aeq: l = lir_cond_aboveEqual; break;
438 case If::beq: l = lir_cond_belowEqual; break;
439 default: fatal("You must pass valid If::Condition");
440 };
441 return l;
442 }
443
444 #ifdef __SOFTFP__
445 void do_soft_float_compare(If *x);
446 #endif // __SOFTFP__
447
448 SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
449 SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
450 void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
451
452 #ifdef TRACE_HAVE_INTRINSICS
453 void do_ClassIDIntrinsic(Intrinsic* x);
454 void do_getBufferWriter(Intrinsic* x);
455 #endif
456
457 void do_RuntimeCall(address routine, Intrinsic* x);
458
459 ciKlass* profile_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k,
460 Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
461 ciKlass* callee_signature_k);
462 void profile_arguments(ProfileCall* x);
463 void profile_parameters(Base* x);
464 void profile_parameters_at_call(ProfileCall* x);
465 LIR_Opr mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
466 LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
467
468 public:
469 Compilation* compilation() const { return _compilation; }
470 FrameMap* frame_map() const { return _compilation->frame_map(); }
471 ciMethod* method() const { return _method; }
472 BlockBegin* block() const { return _block; }
473 IRScope* scope() const { return block()->scope(); }
474
475 int max_virtual_register_number() const { return _virtual_register_number; }
476
477 void block_do(BlockBegin* block);
478
479 // Flags that can be set on vregs
480 enum VregFlag {
481 must_start_in_memory = 0 // needs to be assigned a memory location at beginning, but may then be loaded in a register
482 , callee_saved = 1 // must be in a callee saved register
483 , byte_reg = 2 // must be in a byte register
484 , num_vreg_flags
485
486 };
487
488 LIRGenerator(Compilation* compilation, ciMethod* method)
489 : _compilation(compilation)
490 , _method(method)
491 , _virtual_register_number(LIR_OprDesc::vreg_base)
492 , _vreg_flags(num_vreg_flags) {
493 }
494
495 // for virtual registers, maps them back to Phi's or Local's
496 Instruction* instruction_for_opr(LIR_Opr opr);
497 Instruction* instruction_for_vreg(int reg_num);
498
499 void set_vreg_flag (int vreg_num, VregFlag f);
500 bool is_vreg_flag_set(int vreg_num, VregFlag f);
501 void set_vreg_flag (LIR_Opr opr, VregFlag f) { set_vreg_flag(opr->vreg_number(), f); }
502 bool is_vreg_flag_set(LIR_Opr opr, VregFlag f) { return is_vreg_flag_set(opr->vreg_number(), f); }
503
504 // statics
505 static LIR_Opr exceptionOopOpr();
506 static LIR_Opr exceptionPcOpr();
507 static LIR_Opr divInOpr();
508 static LIR_Opr divOutOpr();
509 static LIR_Opr remOutOpr();
510 #ifdef S390
511 // On S390 we can do ldiv, lrem without RT call.
|