src/cpu/x86/vm/macroAssembler_x86.hpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File
*** old/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Mar 24 16:31:35 2014
--- new/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Mar 24 16:31:34 2014
*** 25,34 ****
--- 25,35 ----
#ifndef CPU_X86_VM_MACROASSEMBLER_X86_HPP
#define CPU_X86_VM_MACROASSEMBLER_X86_HPP
#include "asm/assembler.hpp"
#include "utilities/macros.hpp"
+ #include "runtime/rtmLocking.hpp"
// MacroAssembler extends Assembler by frequently used macros.
//
// Instructions for which a 'better' code sequence exists depending
*** 109,129 ****
--- 110,131 ----
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
op == 0xE9 /* jmp */ ||
op == 0xEB /* short jmp */ ||
(op & 0xF0) == 0x70 /* short jcc */ ||
! op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
! op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */ ||
+ op == 0xC7 && branch[1] == 0xF8 /* xbegin */,
"Invalid opcode at patch point");
if (op == 0xEB || (op & 0xF0) == 0x70) {
// short offset operators (jmp and jcc)
char* disp = (char*) &branch[1];
int imm8 = target - (address) &disp[1];
guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
*disp = imm8;
} else {
! int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
int imm32 = target - (address) &disp[1];
*disp = imm32;
}
}
*** 159,169 ****
--- 161,170 ----
void incrementl(Register reg, int value = 1);
void incrementq(Register reg, int value = 1);
void incrementq(Address dst, int value = 1);
// Support optimal SSE move instructions.
void movflt(XMMRegister dst, XMMRegister src) {
if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
else { movss (dst, src); return; }
}
*** 185,194 ****
--- 186,197 ----
void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
void incrementl(AddressLiteral dst);
void incrementl(ArrayAddress dst);
+ void incrementq(AddressLiteral dst);
+
// Alignment
void align(int modulus);
// A 5 byte nop that is safe for patching (see patch_verified_entry)
void fat_nop();
*** 652,663 ****
--- 655,694 ----
BiasedLockingCounters* counters = NULL);
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
#ifdef COMPILER2
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
// See full desription in macroAssembler_x86.cpp.
- void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
void fast_unlock(Register obj, Register box, Register tmp);
+ Register scr, Register cx1, Register cx2,
+ BiasedLockingCounters* counters,
+ RTMLockingCounters* rtm_counters,
+ RTMLockingCounters* stack_rtm_counters,
+ Metadata* method_data,
+ bool use_rtm, bool profile_rtm);
+ void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
+ #if INCLUDE_RTM_OPT
+ void rtm_counters_update(Register abort_status, Register rtm_counters);
+ void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
+ void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
+ RTMLockingCounters* rtm_counters,
+ Metadata* method_data);
+ void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
+ RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
+ void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
+ void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);
+ void rtm_stack_locking(Register obj, Register tmp, Register scr,
+ Register retry_on_abort_count,
+ RTMLockingCounters* stack_rtm_counters,
+ Metadata* method_data, bool profile_rtm,
+ Label& DONE_LABEL, Label& IsInflated);
+ void rtm_inflated_locking(Register obj, Register box, Register tmp,
+ Register scr, Register retry_on_busy_count,
+ Register retry_on_abort_count,
+ RTMLockingCounters* rtm_counters,
+ Metadata* method_data, bool profile_rtm,
+ Label& DONE_LABEL);
+ #endif
#endif
Condition negate_condition(Condition cond);
// Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
*** 719,728 ****
--- 750,760 ----
void locked_cmpxchgptr(Register reg, AddressLiteral adr);
void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
+ void imulptr(Register dst, Register src, int imm32) { LP64_ONLY(imulq(dst, src, imm32)) NOT_LP64(imull(dst, src, imm32)); }
void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
*** 760,770 ****
--- 792,809 ----
// Helper functions for statistics gathering.
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
void cond_inc32(Condition cond, AddressLiteral counter_addr);
// Unconditional atomic increment.
- void atomic_incl(AddressLiteral counter_addr);
+ void atomic_incl(AddressLiteral counter_addr, Register scr = rscratch1);
+ #ifdef _LP64
+ void atomic_incq(Address counter_addr);
+ void atomic_incq(AddressLiteral counter_addr, Register scr = rscratch1);
+ #endif
+ void atomic_incptr(AddressLiteral counter_addr, Register scr = rscratch1) { LP64_ONLY(atomic_incq(counter_addr, scr)) NOT_LP64(atomic_incl(counter_addr, scr)) ; }
+ void atomic_incptr(Address counter_addr) { LP64_ONLY(atomic_incq(counter_addr)) NOT_LP64(atomic_incl(counter_addr)) ; }
void lea(Register dst, AddressLiteral adr);
void lea(Address dst, AddressLiteral adr);
void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
*** 1072,1082 ****
--- 1111,1125 ----
// can this do an lea?
void movptr(Register dst, ArrayAddress src);
void movptr(Register dst, Address src);
void movptr(Register dst, AddressLiteral src);
+ #ifdef _LP64
+ void movptr(Register dst, AddressLiteral src, Register scratch=rscratch1);
+ #else
+ void movptr(Register dst, AddressLiteral src, Register scratch=noreg); // Scratch reg is ignored in 32-bit
+ #endif
void movptr(Register dst, intptr_t src);
void movptr(Register dst, Register src);
void movptr(Address dst, intptr_t src);
src/cpu/x86/vm/macroAssembler_x86.hpp
Index
Unified diffs
Context diffs
Sdiffs
Wdiffs
Patch
New
Old
Previous File
Next File