< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

        

*** 1905,1917 **** assert(op->cmp_value()->as_register_lo() == rax, "wrong register"); assert(op->cmp_value()->as_register_hi() == rdx, "wrong register"); assert(op->new_value()->as_register_lo() == rbx, "wrong register"); assert(op->new_value()->as_register_hi() == rcx, "wrong register"); Register addr = op->addr()->as_register(); - if (os::is_MP()) { __ lock(); - } NOT_LP64(__ cmpxchg8(Address(addr, 0))); } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) { NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");) Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); --- 1905,1915 ----
*** 1927,1954 **** #ifdef _LP64 if (UseCompressedOops) { __ encode_heap_oop(cmpval); __ mov(rscratch1, newval); __ encode_heap_oop(rscratch1); - if (os::is_MP()) { __ lock(); - } // cmpval (rax) is implicitly used by this instruction __ cmpxchgl(rscratch1, Address(addr, 0)); } else #endif { - if (os::is_MP()) { __ lock(); - } __ cmpxchgptr(newval, Address(addr, 0)); } } else { assert(op->code() == lir_cas_int, "lir_cas_int expected"); - if (os::is_MP()) { __ lock(); - } __ cmpxchgl(newval, Address(addr, 0)); } #ifdef _LP64 } else if (op->code() == lir_cas_long) { Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo()); --- 1925,1946 ----
*** 1957,1969 **** assert(cmpval == rax, "wrong register"); assert(newval != NULL, "new val must be register"); assert(cmpval != newval, "cmp and new values must be in different registers"); assert(cmpval != addr, "cmp and addr must be in different registers"); assert(newval != addr, "new value and addr must be in different registers"); - if (os::is_MP()) { __ lock(); - } __ cmpxchgq(newval, Address(addr, 0)); #endif // _LP64 } else { Unimplemented(); } --- 1949,1959 ----
*** 2794,2804 **** } } void LIR_Assembler::align_call(LIR_Code code) { - if (os::is_MP()) { // make sure that the displacement word of the call ends up word aligned int offset = __ offset(); switch (code) { case lir_static_call: case lir_optvirtual_call: --- 2784,2793 ----
*** 2810,2836 **** break; case lir_virtual_call: // currently, sparc-specific for niagara default: ShouldNotReachHere(); } __ align(BytesPerWord, offset); - } } void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { ! assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); __ call(AddressLiteral(op->addr(), rtype)); add_call_info(code_offset(), op->info()); } void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { __ ic_call(op->addr()); add_call_info(code_offset(), op->info()); ! assert(!os::is_MP() || ! (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); } /* Currently, vtable-dispatch is only enabled for sparc platforms */ --- 2799,2823 ---- break; case lir_virtual_call: // currently, sparc-specific for niagara default: ShouldNotReachHere(); } __ align(BytesPerWord, offset); } void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { ! assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); __ call(AddressLiteral(op->addr(), rtype)); add_call_info(code_offset(), op->info()); } void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { __ ic_call(op->addr()); add_call_info(code_offset(), op->info()); ! assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0, "must be aligned"); } /* Currently, vtable-dispatch is only enabled for sparc platforms */
*** 2846,2863 **** bailout("static call stub overflow"); return; } int start = __ offset(); ! if (os::is_MP()) { // make sure that the displacement word of the call ends up word aligned __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); - } __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */)); __ mov_metadata(rbx, (Metadata*)NULL); // must be set to -1 at code generation time ! assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); // On 64bit this will die since it will take a movq & jmp, must be only a jmp __ jump(RuntimeAddress(__ pc())); if (UseAOT) { // Trampoline to aot code --- 2833,2849 ---- bailout("static call stub overflow"); return; } int start = __ offset(); ! // make sure that the displacement word of the call ends up word aligned __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */)); __ mov_metadata(rbx, (Metadata*)NULL); // must be set to -1 at code generation time ! assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned"); // On 64bit this will die since it will take a movq & jmp, must be only a jmp __ jump(RuntimeAddress(__ pc())); if (UseAOT) { // Trampoline to aot code
*** 3970,3982 **** void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { assert(data == dest, "xchg/xadd uses only 2 operands"); if (data->type() == T_INT) { if (code == lir_xadd) { - if (os::is_MP()) { __ lock(); - } __ xaddl(as_Address(src->as_address_ptr()), data->as_register()); } else { __ xchgl(data->as_register(), as_Address(src->as_address_ptr())); } } else if (data->is_oop()) { --- 3956,3966 ----
*** 3995,4007 **** #endif } else if (data->type() == T_LONG) { #ifdef _LP64 assert(data->as_register_lo() == data->as_register_hi(), "should be a single register"); if (code == lir_xadd) { - if (os::is_MP()) { __ lock(); - } __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo()); } else { __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr())); } #else --- 3979,3989 ----
< prev index next >