< prev index next >

src/hotspot/cpu/arm/macroAssembler_arm.hpp

Print this page




 384   // check info (currently consumed only by C1). If
 385   // swap_reg_contains_mark is true then returns -1 as it is assumed
 386   // the calling code has already passed any potential faults.
 387   // Notes:
 388   // - swap_reg and tmp_reg are scratched
 389   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
 390   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
 391                            bool swap_reg_contains_mark,
 392                            Register tmp2,
 393                            Label& done, Label& slow_case,
 394                            BiasedLockingCounters* counters = NULL);
 395   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
 396 
 397   // Building block for CAS cases of biased locking: makes CAS and records statistics.
 398   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
 399   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
 400                                      Register tmp, Label& slow_case, int* counter_addr);
 401 
 402   void resolve_jobject(Register value, Register tmp1, Register tmp2);
 403 
 404 #if INCLUDE_ALL_GCS
 405   // G1 pre-barrier.
 406   // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 407   // If store_addr != noreg, then previous value is loaded from [store_addr];
 408   // in such case store_addr and new_val registers are preserved;
 409   // otherwise pre_val register is preserved.
 410   void g1_write_barrier_pre(Register store_addr,
 411                             Register new_val,
 412                             Register pre_val,
 413                             Register tmp1,
 414                             Register tmp2);
 415 
 416   // G1 post-barrier.
 417   // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 418   void g1_write_barrier_post(Register store_addr,
 419                              Register new_val,
 420                              Register tmp1,
 421                              Register tmp2,
 422                              Register tmp3);
 423 #endif // INCLUDE_ALL_GCS
 424 
 425 #ifndef AARCH64
 426   void nop() {
 427     mov(R0, R0);
 428   }
 429 
 430   void push(Register rd, AsmCondition cond = al) {
 431     assert(rd != SP, "unpredictable instruction");
 432     str(rd, Address(SP, -wordSize, pre_indexed), cond);
 433   }
 434 
 435   void push(RegisterSet reg_set, AsmCondition cond = al) {
 436     assert(!reg_set.contains(SP), "unpredictable instruction");
 437     stmdb(SP, reg_set, writeback, cond);
 438   }
 439 
 440   void pop(Register rd, AsmCondition cond = al) {
 441     assert(rd != SP, "unpredictable instruction");
 442     ldr(rd, Address(SP, wordSize, post_indexed), cond);
 443   }




 384   // check info (currently consumed only by C1). If
 385   // swap_reg_contains_mark is true then returns -1 as it is assumed
 386   // the calling code has already passed any potential faults.
 387   // Notes:
 388   // - swap_reg and tmp_reg are scratched
 389   // - Rtemp was (implicitly) scratched and can now be specified as the tmp2
 390   int biased_locking_enter(Register obj_reg, Register swap_reg, Register tmp_reg,
 391                            bool swap_reg_contains_mark,
 392                            Register tmp2,
 393                            Label& done, Label& slow_case,
 394                            BiasedLockingCounters* counters = NULL);
 395   void biased_locking_exit(Register obj_reg, Register temp_reg, Label& done);
 396 
 397   // Building block for CAS cases of biased locking: makes CAS and records statistics.
 398   // Optional slow_case label is used to transfer control if CAS fails. Otherwise leaves condition codes set.
 399   void biased_locking_enter_with_cas(Register obj_reg, Register old_mark_reg, Register new_mark_reg,
 400                                      Register tmp, Label& slow_case, int* counter_addr);
 401 
 402   void resolve_jobject(Register value, Register tmp1, Register tmp2);
 403 
 404 #if INCLUDE_G1GC
 405   // G1 pre-barrier.
 406   // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 407   // If store_addr != noreg, then previous value is loaded from [store_addr];
 408   // in such case store_addr and new_val registers are preserved;
 409   // otherwise pre_val register is preserved.
 410   void g1_write_barrier_pre(Register store_addr,
 411                             Register new_val,
 412                             Register pre_val,
 413                             Register tmp1,
 414                             Register tmp2);
 415 
 416   // G1 post-barrier.
 417   // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
 418   void g1_write_barrier_post(Register store_addr,
 419                              Register new_val,
 420                              Register tmp1,
 421                              Register tmp2,
 422                              Register tmp3);
 423 #endif // INCLUDE_G1GC
 424 
 425 #ifndef AARCH64
 426   void nop() {
 427     mov(R0, R0);
 428   }
 429 
 430   void push(Register rd, AsmCondition cond = al) {
 431     assert(rd != SP, "unpredictable instruction");
 432     str(rd, Address(SP, -wordSize, pre_indexed), cond);
 433   }
 434 
 435   void push(RegisterSet reg_set, AsmCondition cond = al) {
 436     assert(!reg_set.contains(SP), "unpredictable instruction");
 437     stmdb(SP, reg_set, writeback, cond);
 438   }
 439 
 440   void pop(Register rd, AsmCondition cond = al) {
 441     assert(rd != SP, "unpredictable instruction");
 442     ldr(rd, Address(SP, wordSize, post_indexed), cond);
 443   }


< prev index next >