< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page

        

@@ -1028,11 +1028,10 @@
 void MacroAssembler::andptr(Register dst, int32_t imm32) {
   LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
 }
 
 void MacroAssembler::atomic_incl(Address counter_addr) {
-  if (os::is_MP())
     lock();
   incrementl(counter_addr);
 }
 
 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {

@@ -1044,11 +1043,10 @@
   }
 }
 
 #ifdef _LP64
 void MacroAssembler::atomic_incq(Address counter_addr) {
-  if (os::is_MP())
     lock();
   incrementq(counter_addr);
 }
 
 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {

@@ -1211,13 +1209,11 @@
   orptr(tmp_reg, r15_thread);
 #else
   get_thread(tmp_reg);
   orptr(tmp_reg, swap_reg);
 #endif
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
   // If the biasing toward our thread failed, this means that
   // another thread succeeded in biasing it toward itself and we
   // need to revoke that bias. The revocation will occur in the
   // interpreter runtime in the slow case.

@@ -1246,13 +1242,11 @@
 #else
   get_thread(swap_reg);
   orptr(tmp_reg, swap_reg);
   movptr(swap_reg, saved_mark_addr);
 #endif
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
   // If the biasing toward our thread failed, then another thread
   // succeeded in biasing it toward itself and we need to revoke that
   // bias. The revocation will occur in the runtime in the slow case.
   if (counters != NULL) {

@@ -1276,13 +1270,11 @@
   //
   // FIXME: due to a lack of registers we currently blow away the age
   // bits in this situation. Should attempt to preserve them.
   NOT_LP64( movptr(swap_reg, saved_mark_addr); )
   load_prototype_header(tmp_reg, obj_reg);
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
   // Fall through to the normal CAS-based lock, because no matter what
   // the result of the above CAS, some thread must have succeeded in
   // removing the bias bit from the object's header.
   if (counters != NULL) {

@@ -1374,13 +1366,11 @@
   cmpptr(tmpReg, scrReg);
   jccb(Assembler::below, L_check_always_rtm1);
   if (method_data != NULL) {
     // set rtm_state to "no rtm" in MDO
     mov_metadata(tmpReg, method_data);
-    if (os::is_MP()) {
       lock();
-    }
     orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
   }
   jmpb(L_done);
   bind(L_check_always_rtm1);
   // Reload RTMLockingCounters* address

@@ -1390,13 +1380,11 @@
   cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
   jccb(Assembler::below, L_done);
   if (method_data != NULL) {
     // set rtm_state to "always rtm" in MDO
     mov_metadata(tmpReg, method_data);
-    if (os::is_MP()) {
       lock();
-    }
     orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
   }
   bind(L_done);
 }
 

@@ -1603,13 +1591,11 @@
   Register threadReg = r15_thread;
 #else
   get_thread(scrReg);
   Register threadReg = scrReg;
 #endif
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
 
   if (RTMRetryCount > 0) {
     // success done else retry
     jccb(Assembler::equal, DONE_LABEL) ;

@@ -1765,13 +1751,11 @@
   jccb(Assembler::notZero, IsInflated);
 
   // Attempt stack-locking ...
   orptr (tmpReg, markOopDesc::unlocked_value);
   movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(boxReg, Address(objReg, oopDesc::mark_offset_in_bytes()));      // Updates tmpReg
   if (counters != NULL) {
     cond_inc32(Assembler::equal,
                ExternalAddress((address)counters->fast_path_entry_count_addr()));
   }

@@ -1824,13 +1808,11 @@
   // But we don't have enough registers, so instead we can either try to CAS
   // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
   // we later store "Self" into m->Owner.  Transiently storing a stack address
   // (rsp or the address of the box) into  m->owner is harmless.
   // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
   movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
   // If we weren't able to swing _owner from NULL to the BasicLock
   // then take the slow path.
   jccb  (Assembler::notZero, DONE_LABEL);

@@ -1849,13 +1831,11 @@
 #else // _LP64
   // It's inflated
   movq(scrReg, tmpReg);
   xorq(tmpReg, tmpReg);
 
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(r15_thread, Address(scrReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
   // Unconditionally set box->_displaced_header = markOopDesc::unused_mark().
   // Without cast to int32_t movptr will destroy r10 which is typically obj.
   movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
   // Intentional fall-through into DONE_LABEL ...

@@ -1998,13 +1978,11 @@
   // It must be stack-locked.
   // Try to reset the header to displaced header.
   // The "box" value on the stack is stable, so we can reload
   // and be assured we observe the same value as above.
   movptr(tmpReg, Address(boxReg, 0));
-  if (os::is_MP()) {
     lock();
-  }
   cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
   // Intention fall-thru into DONE_LABEL
 
   // DONE_LABEL is a hot target - we'd really like to place it at the
   // start of cache line by padding with NOPs.

@@ -2034,20 +2012,20 @@
   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
   jccb  (Assembler::zero, LGoSlowPath);
 
   xorptr(boxReg, boxReg);
   movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
-  if (os::is_MP()) {
+
     // Memory barrier/fence
     // Dekker pivot point -- fulcrum : ST Owner; MEMBAR; LD Succ
     // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
     // This is faster on Nehalem and AMD Shanghai/Barcelona.
     // See https://blogs.oracle.com/dave/entry/instruction_selection_for_volatile_fences
     // We might also restructure (ST Owner=0;barrier;LD _Succ) to
     // (mov box,0; xchgq box, &m->Owner; LD _succ) .
     lock(); addl(Address(rsp, 0), 0);
-  }
+
   cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
   jccb  (Assembler::notZero, LSuccess);
 
   // Rare inopportune interleaving - race.
   // The successor vanished in the small window above.

@@ -2061,11 +2039,11 @@
   // coherence traffic on the lock *and* artifically extended the critical section
   // length while by virtue of passing control into the slow path.
 
   // box is really RAX -- the following CMPXCHG depends on that binding
   // cmpxchg R,[M] is equivalent to rax = CAS(M,rax,R)
-  if (os::is_MP()) { lock(); }
+  lock();
   cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
   // There's no successor so we tried to regrab the lock.
   // If that didn't work, then another thread grabbed the
   // lock so we're done (and exit was a success).
   jccb  (Assembler::notEqual, LSuccess);

@@ -2079,11 +2057,11 @@
   testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
   jmpb  (DONE_LABEL);
 
   bind  (Stacked);
   movptr(tmpReg, Address (boxReg, 0));      // re-fetch
-  if (os::is_MP()) { lock(); }
+  lock();
   cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
 
 #endif
   bind(DONE_LABEL);
 }

@@ -2631,16 +2609,14 @@
 }
 #endif
 
 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
   if (reachable(adr)) {
-    if (os::is_MP())
       lock();
     cmpxchgptr(reg, as_Address(adr));
   } else {
     lea(rscratch1, adr);
-    if (os::is_MP())
       lock();
     cmpxchgptr(reg, Address(rscratch1, 0));
   }
 }
 
< prev index next >