639 void bang_stack_with_offset(int offset) {
640 // stack grows down, caller passes positive offset
641 assert(offset > 0, "must bang with negative offset");
642 movl(Address(rsp, (-offset)), rax);
643 }
644
645 // Writes to stack successive pages until offset reached to check for
646 // stack overflow + shadow pages. Also, clobbers tmp
647 void bang_stack_size(Register size, Register tmp);
648
649 // Check for reserved stack access in method being exited (for JIT)
650 void reserved_stack_check();
651
652 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
653 Register tmp,
654 int offset);
655
656 // Support for serializing memory accesses between threads
657 void serialize_memory(Register thread, Register tmp);
658
659 void verify_tlab();
660
661 // Biased locking support
662 // lock_reg and obj_reg must be loaded up with the appropriate values.
663 // swap_reg must be rax, and is killed.
664 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
665 // be killed; if not supplied, push/pop will be used internally to
666 // allocate a temporary (inefficient, avoid if possible).
667 // Optional slow case is for implementations (interpreter and C1) which branch to
668 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
669 // Returns offset of first potentially-faulting instruction for null
670 // check info (currently consumed only by C1). If
671 // swap_reg_contains_mark is true then returns -1 as it is assumed
672 // the calling code has already passed any potential faults.
673 int biased_locking_enter(Register lock_reg, Register obj_reg,
674 Register swap_reg, Register tmp_reg,
675 bool swap_reg_contains_mark,
676 Label& done, Label* slow_case = NULL,
677 BiasedLockingCounters* counters = NULL);
678 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
|
639 void bang_stack_with_offset(int offset) {
640 // stack grows down, caller passes positive offset
641 assert(offset > 0, "must bang with negative offset");
642 movl(Address(rsp, (-offset)), rax);
643 }
644
645 // Writes to stack successive pages until offset reached to check for
646 // stack overflow + shadow pages. Also, clobbers tmp
647 void bang_stack_size(Register size, Register tmp);
648
649 // Check for reserved stack access in method being exited (for JIT)
650 void reserved_stack_check();
651
652 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
653 Register tmp,
654 int offset);
655
656 // Support for serializing memory accesses between threads
657 void serialize_memory(Register thread, Register tmp);
658
659 #ifdef _LP64
660 void safepoint_poll(Label& slow_path, Register thread_reg, Register temp_reg);
661 #else
662 void safepoint_poll(Label& slow_path);
663 #endif
664
665 void verify_tlab();
666
667 // Biased locking support
668 // lock_reg and obj_reg must be loaded up with the appropriate values.
669 // swap_reg must be rax, and is killed.
670 // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
671 // be killed; if not supplied, push/pop will be used internally to
672 // allocate a temporary (inefficient, avoid if possible).
673 // Optional slow case is for implementations (interpreter and C1) which branch to
674 // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
675 // Returns offset of first potentially-faulting instruction for null
676 // check info (currently consumed only by C1). If
677 // swap_reg_contains_mark is true then returns -1 as it is assumed
678 // the calling code has already passed any potential faults.
679 int biased_locking_enter(Register lock_reg, Register obj_reg,
680 Register swap_reg, Register tmp_reg,
681 bool swap_reg_contains_mark,
682 Label& done, Label* slow_case = NULL,
683 BiasedLockingCounters* counters = NULL);
684 void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
|