< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page

        

@@ -2462,15 +2462,12 @@
     __ orptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 
     // Save (object->mark() | 1) into BasicLock's displaced header
     __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
 
-    if (os::is_MP()) {
-      __ lock();
-    }
-
     // src -> dest iff dest == rax else rax <- dest
+    __ lock();
     __ cmpxchgptr(lock_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     __ jcc(Assembler::equal, lock_done);
 
     // Hmm should this move to the slow path code area???
 

@@ -2556,11 +2553,10 @@
   //     VM thread changes sync state to synchronizing and suspends threads for GC.
   //     Thread A is resumed to finish this native method, but doesn't block here since it
   //     didn't see any synchronization is progress, and escapes.
   __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
 
-  if(os::is_MP()) {
     if (UseMembar) {
       // Force this write out before the read below
       __ membar(Assembler::Membar_mask_bits(
            Assembler::LoadLoad | Assembler::LoadStore |
            Assembler::StoreLoad | Assembler::StoreStore));

@@ -2569,11 +2565,10 @@
       // We use the current thread pointer to calculate a thread specific
       // offset to write to within the page. This minimizes bus traffic
       // due to cache line collision.
       __ serialize_memory(r15_thread, rcx);
     }
-  }
 
   Label after_transition;
 
   // check for safepoint operation in progress and/or pending suspend requests
   {

@@ -2659,13 +2654,11 @@
     __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
     //  get old displaced header
     __ movptr(old_hdr, Address(rax, 0));
 
     // Atomic swap old header if oop still contains the stack lock
-    if (os::is_MP()) {
       __ lock();
-    }
     __ cmpxchgptr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
     __ jcc(Assembler::notEqual, slow_path_unlock);
 
     // slow path re-enters here
     __ bind(unlock_done);
< prev index next >