< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

        

@@ -1905,13 +1905,11 @@
     assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
     assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
     assert(op->new_value()->as_register_lo() == rbx, "wrong register");
     assert(op->new_value()->as_register_hi() == rcx, "wrong register");
     Register addr = op->addr()->as_register();
-    if (os::is_MP()) {
       __ lock();
-    }
     NOT_LP64(__ cmpxchg8(Address(addr, 0)));
 
   } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
     NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());

@@ -1927,28 +1925,22 @@
 #ifdef _LP64
       if (UseCompressedOops) {
         __ encode_heap_oop(cmpval);
         __ mov(rscratch1, newval);
         __ encode_heap_oop(rscratch1);
-        if (os::is_MP()) {
           __ lock();
-        }
         // cmpval (rax) is implicitly used by this instruction
         __ cmpxchgl(rscratch1, Address(addr, 0));
       } else
 #endif
       {
-        if (os::is_MP()) {
           __ lock();
-        }
         __ cmpxchgptr(newval, Address(addr, 0));
       }
     } else {
       assert(op->code() == lir_cas_int, "lir_cas_int expected");
-      if (os::is_MP()) {
         __ lock();
-      }
       __ cmpxchgl(newval, Address(addr, 0));
     }
 #ifdef _LP64
   } else if (op->code() == lir_cas_long) {
     Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());

@@ -1957,13 +1949,11 @@
     assert(cmpval == rax, "wrong register");
     assert(newval != NULL, "new val must be register");
     assert(cmpval != newval, "cmp and new values must be in different registers");
     assert(cmpval != addr, "cmp and addr must be in different registers");
     assert(newval != addr, "new value and addr must be in different registers");
-    if (os::is_MP()) {
       __ lock();
-    }
     __ cmpxchgq(newval, Address(addr, 0));
 #endif // _LP64
   } else {
     Unimplemented();
   }

@@ -2794,11 +2784,10 @@
   }
 }
 
 
 void LIR_Assembler::align_call(LIR_Code code) {
-  if (os::is_MP()) {
     // make sure that the displacement word of the call ends up word aligned
     int offset = __ offset();
     switch (code) {
       case lir_static_call:
       case lir_optvirtual_call:

@@ -2810,27 +2799,25 @@
       break;
       case lir_virtual_call:  // currently, sparc-specific for niagara
       default: ShouldNotReachHere();
     }
     __ align(BytesPerWord, offset);
-  }
 }
 
 
 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
-  assert(!os::is_MP() || (__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
+  assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
   __ call(AddressLiteral(op->addr(), rtype));
   add_call_info(code_offset(), op->info());
 }
 
 
 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
   __ ic_call(op->addr());
   add_call_info(code_offset(), op->info());
-  assert(!os::is_MP() ||
-         (__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
+  assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
          "must be aligned");
 }
 
 
 /* Currently, vtable-dispatch is only enabled for sparc platforms */

@@ -2846,18 +2833,17 @@
     bailout("static call stub overflow");
     return;
   }
 
   int start = __ offset();
-  if (os::is_MP()) {
+
     // make sure that the displacement word of the call ends up word aligned
     __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
-  }
   __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
   __ mov_metadata(rbx, (Metadata*)NULL);
   // must be set to -1 at code generation time
-  assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
+  assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
   // On 64bit this will die since it will take a movq & jmp, must be only a jmp
   __ jump(RuntimeAddress(__ pc()));
 
   if (UseAOT) {
     // Trampoline to aot code

@@ -3970,13 +3956,11 @@
 void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
   assert(data == dest, "xchg/xadd uses only 2 operands");
 
   if (data->type() == T_INT) {
     if (code == lir_xadd) {
-      if (os::is_MP()) {
         __ lock();
-      }
       __ xaddl(as_Address(src->as_address_ptr()), data->as_register());
     } else {
       __ xchgl(data->as_register(), as_Address(src->as_address_ptr()));
     }
   } else if (data->is_oop()) {

@@ -3995,13 +3979,11 @@
 #endif
   } else if (data->type() == T_LONG) {
 #ifdef _LP64
     assert(data->as_register_lo() == data->as_register_hi(), "should be a single register");
     if (code == lir_xadd) {
-      if (os::is_MP()) {
         __ lock();
-      }
       __ xaddq(as_Address(src->as_address_ptr()), data->as_register_lo());
     } else {
       __ xchgq(data->as_register_lo(), as_Address(src->as_address_ptr()));
     }
 #else
< prev index next >