< prev index next >

src/hotspot/cpu/arm/macroAssembler_arm.hpp

Print this page
rev 52777 : 8214512: ARM32: Jtreg test compiler/c2/Test8062950.java fails on ARM
Summary: fix assertion failures with -XX:-OptoBiasInlining
Reviewed-by: duke
Contributed-by: nick.gasson@arm.com


 354                      RegisterOrConstant size_expression, Label& slow_case);
 355   void tlab_allocate(Register obj, Register obj_end, Register tmp1,
 356                      RegisterOrConstant size_expression, Label& slow_case);
 357 
 358   void zero_memory(Register start, Register end, Register tmp);
 359 
 360   static bool needs_explicit_null_check(intptr_t offset);
 361   static bool uses_implicit_null_check(void* address);
 362 
 363   void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp);
 364   void arm_stack_overflow_check(Register Rsize, Register tmp);
 365 
 366   void bang_stack_with_offset(int offset) {
 367     ShouldNotReachHere();
 368   }
 369 
 370   // Biased locking support
 371   // lock_reg and obj_reg must be loaded up with the appropriate values.
 372   // swap_reg must be supplied.
 373   // tmp_reg must be supplied.
 374   // Optional slow case is for implementations (interpreter and C1) which branch to
 375   // slow case directly. If slow_case is NULL, then leaves condition
 376   // codes set (for C2's Fast_Lock node) and jumps to done label.
 377   // Falls through for the fast locking attempt.
 378   // Returns offset of first potentially-faulting instruction for null
 379   // check info (currently consumed only by C1). If
 380   // swap_reg_contains_mark is true then returns -1 as it is assumed
 381   // the calling code has already passed any potential faults.
 382   // Notes:
 383   // - swap_reg and tmp_reg are scratched
 384   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
 385   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
 386                            bool swap_reg_contains_mark,
 387                            Register tmp2,
 388                            Label& done, Label& slow_case,
 389                            BiasedLockingCounters* counters = NULL);
 390   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
 391 
 392   // Building block for CAS cases of biased locking: makes CAS and records statistics.
 393   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
 394   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
 395                                      Register tmp, Label& slow_case, int* counter_addr);
 396 
 397   void resolve_jobject(Register value, Register tmp1, Register tmp2);


1056                           Register chr1, Register chr2, Label& Ldone);
1057 
1058 
1059   void floating_cmp(Register dst);
1060 
1061   // improved x86 portability (minimizing source code changes)
1062 
1063   void ldr_literal(Register rd, AddressLiteral addr) {
1064     relocate(addr.rspec());
1065     ldr(rd, Address(PC, addr.target() - pc() - 8));
1066   }
1067 
1068   void lea(Register Rd, AddressLiteral addr) {
1069     // Never dereferenced, as on x86 (lval status ignored)
1070     mov_address(Rd, addr.target(), addr.rspec());
1071   }
1072 
1073   void restore_default_fp_mode();
1074 
1075 #ifdef COMPILER2
1076   void fast_lock(Register obj, Register box, Register scratch, Register scratch2);
1077   void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
1078 #endif
1079 
1080 
1081 };
1082 
1083 
1084 // The purpose of this class is to build several code fragments of the same size
1085 // in order to allow fast table branch.
1086 
1087 class FixedSizeCodeBlock {
1088 public:
1089   FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled);
1090   ~FixedSizeCodeBlock();
1091 
1092 private:
1093   MacroAssembler* _masm;
1094   address _start;
1095   int _size_in_instrs;
1096   bool _enabled;


 354                      RegisterOrConstant size_expression, Label& slow_case);
 355   void tlab_allocate(Register obj, Register obj_end, Register tmp1,
 356                      RegisterOrConstant size_expression, Label& slow_case);
 357 
 358   void zero_memory(Register start, Register end, Register tmp);
 359 
 360   static bool needs_explicit_null_check(intptr_t offset);
 361   static bool uses_implicit_null_check(void* address);
 362 
 363   void arm_stack_overflow_check(int frame_size_in_bytes, Register tmp);
 364   void arm_stack_overflow_check(Register Rsize, Register tmp);
 365 
 366   void bang_stack_with_offset(int offset) {
 367     ShouldNotReachHere();
 368   }
 369 
 370   // Biased locking support
 371   // lock_reg and obj_reg must be loaded up with the appropriate values.
 372   // swap_reg must be supplied.
 373   // tmp_reg must be supplied.
 374   // Done label is branched to with condition code EQ set if the lock is
 375   // biased and we acquired it. Slow case label is branched to with
 376   // condition code NE set if the lock is biased but we failed to acquire
 377   // it. Otherwise fall through.
 378   // Returns offset of first potentially-faulting instruction for null
 379   // check info (currently consumed only by C1). If
 380   // swap_reg_contains_mark is true then returns -1 as it is assumed
 381   // the calling code has already passed any potential faults.
 382   // Notes:
 383   // - swap_reg and tmp_reg are scratched
 384   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
 385   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
 386                            bool swap_reg_contains_mark,
 387                            Register tmp2,
 388                            Label& done, Label& slow_case,
 389                            BiasedLockingCounters* counters = NULL);
 390   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
 391 
 392   // Building block for CAS cases of biased locking: makes CAS and records statistics.
 393   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
 394   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
 395                                      Register tmp, Label& slow_case, int* counter_addr);
 396 
 397   void resolve_jobject(Register value, Register tmp1, Register tmp2);


1056                           Register chr1, Register chr2, Label& Ldone);
1057 
1058 
1059   void floating_cmp(Register dst);
1060 
1061   // improved x86 portability (minimizing source code changes)
1062 
1063   void ldr_literal(Register rd, AddressLiteral addr) {
1064     relocate(addr.rspec());
1065     ldr(rd, Address(PC, addr.target() - pc() - 8));
1066   }
1067 
1068   void lea(Register Rd, AddressLiteral addr) {
1069     // Never dereferenced, as on x86 (lval status ignored)
1070     mov_address(Rd, addr.target(), addr.rspec());
1071   }
1072 
1073   void restore_default_fp_mode();
1074 
1075 #ifdef COMPILER2
1076   void fast_lock(Register obj, Register box, Register scratch, Register scratch2, Register scratch3 = noreg);
1077   void fast_unlock(Register obj, Register box, Register scratch, Register scratch2);
1078 #endif
1079 
1080 
1081 };
1082 
1083 
1084 // The purpose of this class is to build several code fragments of the same size
1085 // in order to allow fast table branch.
1086 
1087 class FixedSizeCodeBlock {
1088 public:
1089   FixedSizeCodeBlock(MacroAssembler* masm, int size_in_instrs, bool enabled);
1090   ~FixedSizeCodeBlock();
1091 
1092 private:
1093   MacroAssembler* _masm;
1094   address _start;
1095   int _size_in_instrs;
1096   bool _enabled;
< prev index next >