< prev index next >

src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp

Print this page
rev 13452 : [mq]: coleen_review1
rev 13453 : [mq]: dholmes_review1


 295       strasm_sync
 296       );
 297   }
 298 }
 299 
 300 inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
 301   if (order != memory_order_relaxed) {
 302     __asm__ __volatile__ (
 303       /* fence */
 304       strasm_sync
 305       );
 306   }
 307 }
 308 
 309 template<>
 310 template<typename T>
 311 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
 312                                                 T volatile* dest,
 313                                                 T compare_value,
 314                                                 cmpxchg_memory_order order) const {


 315   // Note that cmpxchg guarantees a two-way memory barrier across
 316   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 317   // specified otherwise (see atomic.hpp).
 318 
 319   // Using 32 bit internally.
 320   volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
 321 
 322 #ifdef VM_LITTLE_ENDIAN
 323   const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
 324 #else
 325   const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
 326 #endif
 327   const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
 328                      masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
 329                      xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
 330 
 331   unsigned int old_value, value32;
 332 
 333   cmpxchg_pre_membar(order);
 334 


 363       [masked_compare_val]  "r"     (masked_compare_val),
 364       [xor_value]           "r"     (xor_value),
 365                             "m"     (*dest),
 366                             "m"     (*dest_base)
 367     /* clobber */
 368     : "cc",
 369       "memory"
 370     );
 371 
 372   cmpxchg_post_membar(order);
 373 
 374   return IntegerTypes::cast<T>((unsigned char)old_value);
 375 }
 376 
 377 template<>
 378 template<typename T>
 379 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 380                                                 T volatile* dest,
 381                                                 T compare_value,
 382                                                 cmpxchg_memory_order order) const {


 383   // Note that cmpxchg guarantees a two-way memory barrier across
 384   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 385   // specified otherwise (see atomic.hpp).
 386 
 387   unsigned int old_value;
 388   const uint64_t zero = 0;
 389 
 390   cmpxchg_pre_membar(order);
 391 
 392   __asm__ __volatile__ (
 393     /* simple guard */
 394     "   lwz     %[old_value], 0(%[dest])                \n"
 395     "   cmpw    %[compare_value], %[old_value]          \n"
 396     "   bne-    2f                                      \n"
 397     /* atomic loop */
 398     "1:                                                 \n"
 399     "   lwarx   %[old_value], %[dest], %[zero]          \n"
 400     "   cmpw    %[compare_value], %[old_value]          \n"
 401     "   bne-    2f                                      \n"
 402     "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
 403     "   bne-    1b                                      \n"
 404     /* exit */
 405     "2:                                                 \n"
 406     /* out */
 407     : [old_value]       "=&r"   (old_value),
 408                         "=m"    (*dest)
 409     /* in */
 410     : [dest]            "b"     (dest),
 411       [zero]            "r"     (zero),
 412       [compare_value]   "r"     (compare_value),
 413       [exchange_value]  "r"     (exchange_value),
 414                         "m"     (*dest)
 415     /* clobber */
 416     : "cc",
 417       "memory"
 418     );
 419 
 420   cmpxchg_post_membar(order);
 421 
 422   return IntegerTypes::cast<T>(old_value);
 423 }
 424 
 425 template<>
 426 template<typename T>
 427 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 428                                                 T volatile* dest,
 429                                                 T compare_value,
 430                                                 cmpxchg_memory_order order) const {


 431   // Note that cmpxchg guarantees a two-way memory barrier across
 432   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 433   // specified otherwise (see atomic.hpp).
 434 
 435   long old_value;
 436   const uint64_t zero = 0;
 437 
 438   cmpxchg_pre_membar(order);
 439 
 440   __asm__ __volatile__ (
 441     /* simple guard */
 442     "   ld      %[old_value], 0(%[dest])                \n"
 443     "   cmpd    %[compare_value], %[old_value]          \n"
 444     "   bne-    2f                                      \n"
 445     /* atomic loop */
 446     "1:                                                 \n"
 447     "   ldarx   %[old_value], %[dest], %[zero]          \n"
 448     "   cmpd    %[compare_value], %[old_value]          \n"
 449     "   bne-    2f                                      \n"
 450     "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
 451     "   bne-    1b                                      \n"
 452     /* exit */
 453     "2:                                                 \n"
 454     /* out */
 455     : [old_value]       "=&r"   (old_value),
 456                         "=m"    (*dest)
 457     /* in */
 458     : [dest]            "b"     (dest),
 459       [zero]            "r"     (zero),
 460       [compare_value]   "r"     (compare_value),
 461       [exchange_value]  "r"     (exchange_value),
 462                         "m"     (*dest)
 463     /* clobber */
 464     : "cc",
 465       "memory"
 466     );
 467 
 468   cmpxchg_post_membar(order);
 469 
 470   return IntegerTypes::cast<T>(old_value);
 471 }
 472 
 473 #undef strasm_sync
 474 #undef strasm_lwsync
 475 #undef strasm_isync
 476 #undef strasm_release
 477 #undef strasm_acquire
 478 #undef strasm_fence
 479 #undef strasm_nobarrier
 480 #undef strasm_nobarrier_clobber_memory
 481 
 482 #endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP


 295       strasm_sync
 296       );
 297   }
 298 }
 299 
 300 inline void cmpxchg_post_membar(cmpxchg_memory_order order) {
 301   if (order != memory_order_relaxed) {
 302     __asm__ __volatile__ (
 303       /* fence */
 304       strasm_sync
 305       );
 306   }
 307 }
 308 
 309 template<>
 310 template<typename T>
 311 inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
 312                                                 T volatile* dest,
 313                                                 T compare_value,
 314                                                 cmpxchg_memory_order order) const {
 315   STATIC_ASSERT(1 == sizeof(T));
 316 
 317   // Note that cmpxchg guarantees a two-way memory barrier across
 318   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 319   // specified otherwise (see atomic.hpp).
 320 
 321   // Using 32 bit internally.
 322   volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3);
 323 
 324 #ifdef VM_LITTLE_ENDIAN
 325   const unsigned int shift_amount        = ((uintptr_t)dest & 3) * 8;
 326 #else
 327   const unsigned int shift_amount        = ((~(uintptr_t)dest) & 3) * 8;
 328 #endif
 329   const unsigned int masked_compare_val  = ((unsigned int)(unsigned char)compare_value),
 330                      masked_exchange_val = ((unsigned int)(unsigned char)exchange_value),
 331                      xor_value           = (masked_compare_val ^ masked_exchange_val) << shift_amount;
 332 
 333   unsigned int old_value, value32;
 334 
 335   cmpxchg_pre_membar(order);
 336 


 365       [masked_compare_val]  "r"     (masked_compare_val),
 366       [xor_value]           "r"     (xor_value),
 367                             "m"     (*dest),
 368                             "m"     (*dest_base)
 369     /* clobber */
 370     : "cc",
 371       "memory"
 372     );
 373 
 374   cmpxchg_post_membar(order);
 375 
 376   return IntegerTypes::cast<T>((unsigned char)old_value);
 377 }
 378 
 379 template<>
 380 template<typename T>
 381 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 382                                                 T volatile* dest,
 383                                                 T compare_value,
 384                                                 cmpxchg_memory_order order) const {
 385   STATIC_ASSERT(4 == sizeof(T));
 386 
 387   // Note that cmpxchg guarantees a two-way memory barrier across
 388   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 389   // specified otherwise (see atomic.hpp).
 390 
 391   T old_value;
 392   const uint64_t zero = 0;
 393 
 394   cmpxchg_pre_membar(order);
 395 
 396   __asm__ __volatile__ (
 397     /* simple guard */
 398     "   lwz     %[old_value], 0(%[dest])                \n"
 399     "   cmpw    %[compare_value], %[old_value]          \n"
 400     "   bne-    2f                                      \n"
 401     /* atomic loop */
 402     "1:                                                 \n"
 403     "   lwarx   %[old_value], %[dest], %[zero]          \n"
 404     "   cmpw    %[compare_value], %[old_value]          \n"
 405     "   bne-    2f                                      \n"
 406     "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
 407     "   bne-    1b                                      \n"
 408     /* exit */
 409     "2:                                                 \n"
 410     /* out */
 411     : [old_value]       "=&r"   (old_value),
 412                         "=m"    (*dest)
 413     /* in */
 414     : [dest]            "b"     (dest),
 415       [zero]            "r"     (zero),
 416       [compare_value]   "r"     (compare_value),
 417       [exchange_value]  "r"     (exchange_value),
 418                         "m"     (*dest)
 419     /* clobber */
 420     : "cc",
 421       "memory"
 422     );
 423 
 424   cmpxchg_post_membar(order);
 425 
 426   return old_value;
 427 }
 428 
 429 template<>
 430 template<typename T>
 431 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 432                                                 T volatile* dest,
 433                                                 T compare_value,
 434                                                 cmpxchg_memory_order order) const {
 435   STATIC_ASSERT(8 == sizeof(T));
 436 
 437   // Note that cmpxchg guarantees a two-way memory barrier across
 438   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
 439   // specified otherwise (see atomic.hpp).
 440 
 441   T old_value;
 442   const uint64_t zero = 0;
 443 
 444   cmpxchg_pre_membar(order);
 445 
 446   __asm__ __volatile__ (
 447     /* simple guard */
 448     "   ld      %[old_value], 0(%[dest])                \n"
 449     "   cmpd    %[compare_value], %[old_value]          \n"
 450     "   bne-    2f                                      \n"
 451     /* atomic loop */
 452     "1:                                                 \n"
 453     "   ldarx   %[old_value], %[dest], %[zero]          \n"
 454     "   cmpd    %[compare_value], %[old_value]          \n"
 455     "   bne-    2f                                      \n"
 456     "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
 457     "   bne-    1b                                      \n"
 458     /* exit */
 459     "2:                                                 \n"
 460     /* out */
 461     : [old_value]       "=&r"   (old_value),
 462                         "=m"    (*dest)
 463     /* in */
 464     : [dest]            "b"     (dest),
 465       [zero]            "r"     (zero),
 466       [compare_value]   "r"     (compare_value),
 467       [exchange_value]  "r"     (exchange_value),
 468                         "m"     (*dest)
 469     /* clobber */
 470     : "cc",
 471       "memory"
 472     );
 473 
 474   cmpxchg_post_membar(order);
 475 
 476   return old_value;
 477 }
 478 
 479 #undef strasm_sync
 480 #undef strasm_lwsync
 481 #undef strasm_isync
 482 #undef strasm_release
 483 #undef strasm_acquire
 484 #undef strasm_fence
 485 #undef strasm_nobarrier
 486 #undef strasm_nobarrier_clobber_memory
 487 
 488 #endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_HPP
< prev index next >