src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 8033805 Sdiff src/cpu/x86/vm

src/cpu/x86/vm/macroAssembler_x86.hpp

Print this page




 634   void verify_tlab();
 635 
 636   // Biased locking support
 637   // lock_reg and obj_reg must be loaded up with the appropriate values.
 638   // swap_reg must be rax, and is killed.
 639   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 640   // be killed; if not supplied, push/pop will be used internally to
 641   // allocate a temporary (inefficient, avoid if possible).
 642   // Optional slow case is for implementations (interpreter and C1) which branch to
 643   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 644   // Returns offset of first potentially-faulting instruction for null
 645   // check info (currently consumed only by C1). If
 646   // swap_reg_contains_mark is true then returns -1 as it is assumed
 647   // the calling code has already passed any potential faults.
 648   int biased_locking_enter(Register lock_reg, Register obj_reg,
 649                            Register swap_reg, Register tmp_reg,
 650                            bool swap_reg_contains_mark,
 651                            Label& done, Label* slow_case = NULL,
 652                            BiasedLockingCounters* counters = NULL);
 653   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 654 





 655 
 656   Condition negate_condition(Condition cond);
 657 
 658   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 659   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 660   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 661   // here in MacroAssembler. The major exception to this rule is call
 662 
 663   // Arithmetics
 664 
 665 
 666   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 667   void addptr(Address dst, Register src);
 668 
 669   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 670   void addptr(Register dst, int32_t src);
 671   void addptr(Register dst, Register src);
 672   void addptr(Register dst, RegisterOrConstant src) {
 673     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 674     else                   addptr(dst,       src.as_register());




 634   void verify_tlab();
 635 
 636   // Biased locking support
 637   // lock_reg and obj_reg must be loaded up with the appropriate values.
 638   // swap_reg must be rax, and is killed.
 639   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 640   // be killed; if not supplied, push/pop will be used internally to
 641   // allocate a temporary (inefficient, avoid if possible).
 642   // Optional slow case is for implementations (interpreter and C1) which branch to
 643   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 644   // Returns offset of first potentially-faulting instruction for null
 645   // check info (currently consumed only by C1). If
 646   // swap_reg_contains_mark is true then returns -1 as it is assumed
 647   // the calling code has already passed any potential faults.
 648   int biased_locking_enter(Register lock_reg, Register obj_reg,
 649                            Register swap_reg, Register tmp_reg,
 650                            bool swap_reg_contains_mark,
 651                            Label& done, Label* slow_case = NULL,
 652                            BiasedLockingCounters* counters = NULL);
 653   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 654 #ifdef COMPILER2
 655   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 656   // See full desription in macroAssembler_x86.cpp.
 657   void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
 658   void fast_unlock(Register obj, Register box, Register tmp);
 659 #endif
 660 
 661   Condition negate_condition(Condition cond);
 662 
 663   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 664   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 665   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 666   // here in MacroAssembler. The major exception to this rule is call
 667 
 668   // Arithmetics
 669 
 670 
 671   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 672   void addptr(Address dst, Register src);
 673 
 674   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 675   void addptr(Register dst, int32_t src);
 676   void addptr(Register dst, Register src);
 677   void addptr(Register dst, RegisterOrConstant src) {
 678     if (src.is_constant()) addptr(dst, (int) src.as_constant());
 679     else                   addptr(dst,       src.as_register());


src/cpu/x86/vm/macroAssembler_x86.hpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File