1 /* 2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_ATOMIC_HPP 26 #define SHARE_RUNTIME_ATOMIC_HPP 27 28 #include "memory/allocation.hpp" 29 #include "metaprogramming/conditional.hpp" 30 #include "metaprogramming/enableIf.hpp" 31 #include "metaprogramming/isIntegral.hpp" 32 #include "metaprogramming/isPointer.hpp" 33 #include "metaprogramming/isSame.hpp" 34 #include "metaprogramming/primitiveConversions.hpp" 35 #include "metaprogramming/removeCV.hpp" 36 #include "metaprogramming/removePointer.hpp" 37 #include "runtime/orderAccess.hpp" 38 #include "utilities/align.hpp" 39 #include "utilities/macros.hpp" 40 41 enum atomic_memory_order { 42 // The modes that align with C++11 are intended to 43 // follow the same semantics. 44 memory_order_relaxed = 0, 45 memory_order_acquire = 2, 46 memory_order_release = 3, 47 memory_order_acq_rel = 4, 48 // Strong two-way memory barrier. 49 memory_order_conservative = 8 50 }; 51 52 enum ScopedFenceType { 53 X_ACQUIRE 54 , RELEASE_X 55 , RELEASE_X_FENCE 56 }; 57 58 class Atomic : AllStatic { 59 public: 60 // Atomic operations on int64 types are not available on all 32-bit 61 // platforms. If atomic ops on int64 are defined here they must only 62 // be used from code that verifies they are available at runtime and 63 // can provide an alternative action if not - see supports_cx8() for 64 // a means to test availability. 65 66 // The memory operations that are mentioned with each of the atomic 67 // function families come from src/share/vm/runtime/orderAccess.hpp, 68 // e.g., <fence> is described in that file and is implemented by the 69 // OrderAccess::fence() function. See that file for the gory details 70 // on the Memory Access Ordering Model. 71 72 // All of the atomic operations that imply a read-modify-write action 73 // guarantee a two-way memory barrier across that operation. Historically 74 // these semantics reflect the strength of atomic operations that are 75 // provided on SPARC/X86. We assume that strength is necessary unless 76 // we can prove that a weaker form is sufficiently safe. 77 78 // Atomically store to a location 79 // The type T must be either a pointer type convertible to or equal 80 // to D, an integral/enum type equal to D, or a type equal to D that 81 // is primitive convertible using PrimitiveConversions. 82 template<typename T, typename D> 83 inline static void store(T store_value, volatile D* dest); 84 85 template <typename T, typename D> 86 inline static void release_store(volatile D* dest, T store_value); 87 88 template <typename T, typename D> 89 inline static void release_store_fence(volatile D* dest, T store_value); 90 91 // Atomically load from a location 92 // The type T must be either a pointer type, an integral/enum type, 93 // or a type that is primitive convertible using PrimitiveConversions. 94 template<typename T> 95 inline static T load(const volatile T* dest); 96 97 template <typename T> 98 inline static T load_acquire(const volatile T* dest); 99 100 // Atomically add to a location. Returns updated value. add*() provide: 101 // <fence> add-value-to-dest <membar StoreLoad|StoreStore> 102 103 template<typename I, typename D> 104 inline static D add(I add_value, D volatile* dest, 105 atomic_memory_order order = memory_order_conservative); 106 107 template<typename I, typename D> 108 inline static D sub(I sub_value, D volatile* dest, 109 atomic_memory_order order = memory_order_conservative); 110 111 // Atomically increment location. inc() provide: 112 // <fence> increment-dest <membar StoreLoad|StoreStore> 113 // The type D may be either a pointer type, or an integral 114 // type. If it is a pointer type, then the increment is 115 // scaled to the size of the type pointed to by the pointer. 116 template<typename D> 117 inline static void inc(D volatile* dest, 118 atomic_memory_order order = memory_order_conservative); 119 120 // Atomically decrement a location. dec() provide: 121 // <fence> decrement-dest <membar StoreLoad|StoreStore> 122 // The type D may be either a pointer type, or an integral 123 // type. If it is a pointer type, then the decrement is 124 // scaled to the size of the type pointed to by the pointer. 125 template<typename D> 126 inline static void dec(D volatile* dest, 127 atomic_memory_order order = memory_order_conservative); 128 129 // Performs atomic exchange of *dest with exchange_value. Returns old 130 // prior value of *dest. xchg*() provide: 131 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore> 132 // The type T must be either a pointer type convertible to or equal 133 // to D, an integral/enum type equal to D, or a type equal to D that 134 // is primitive convertible using PrimitiveConversions. 135 template<typename T, typename D> 136 inline static D xchg(T exchange_value, volatile D* dest, 137 atomic_memory_order order = memory_order_conservative); 138 139 // Performs atomic compare of *dest and compare_value, and exchanges 140 // *dest with exchange_value if the comparison succeeded. Returns prior 141 // value of *dest. cmpxchg*() provide: 142 // <fence> compare-and-exchange <membar StoreLoad|StoreStore> 143 144 template<typename T, typename D, typename U> 145 inline static D cmpxchg(T exchange_value, 146 D volatile* dest, 147 U compare_value, 148 atomic_memory_order order = memory_order_conservative); 149 150 // Performs atomic compare of *dest and NULL, and replaces *dest 151 // with exchange_value if the comparison succeeded. Returns true if 152 // the comparison succeeded and the exchange occurred. This is 153 // often used as part of lazy initialization, as a lock-free 154 // alternative to the Double-Checked Locking Pattern. 155 template<typename T, typename D> 156 inline static bool replace_if_null(T* value, D* volatile* dest, 157 atomic_memory_order order = memory_order_conservative); 158 159 private: 160 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private 161 // Test whether From is implicitly convertible to To. 162 // From and To must be pointer types. 163 // Note: Provides the limited subset of C++11 std::is_convertible 164 // that is needed here. 165 template<typename From, typename To> struct IsPointerConvertible; 166 167 protected: 168 // Dispatch handler for store. Provides type-based validity 169 // checking and limited conversions around calls to the platform- 170 // specific implementation layer provided by PlatformOp. 171 template<typename T, typename D, typename PlatformOp, typename Enable = void> 172 struct StoreImpl; 173 174 // Platform-specific implementation of store. Support for sizes 175 // of 1, 2, 4, and (if different) pointer size bytes are required. 176 // The class is a function object that must be default constructable, 177 // with these requirements: 178 // 179 // either: 180 // - dest is of type D*, an integral, enum or pointer type. 181 // - new_value are of type T, an integral, enum or pointer type D or 182 // pointer type convertible to D. 183 // or: 184 // - T and D are the same and are primitive convertible using PrimitiveConversions 185 // and either way: 186 // - platform_store is an object of type PlatformStore<sizeof(T)>. 187 // 188 // Then 189 // platform_store(new_value, dest) 190 // must be a valid expression. 191 // 192 // The default implementation is a volatile store. If a platform 193 // requires more for e.g. 64 bit stores, a specialization is required 194 template<size_t byte_size> struct PlatformStore; 195 196 // Dispatch handler for load. Provides type-based validity 197 // checking and limited conversions around calls to the platform- 198 // specific implementation layer provided by PlatformOp. 199 template<typename T, typename PlatformOp, typename Enable = void> 200 struct LoadImpl; 201 202 // Platform-specific implementation of load. Support for sizes of 203 // 1, 2, 4 bytes and (if different) pointer size bytes are required. 204 // The class is a function object that must be default 205 // constructable, with these requirements: 206 // 207 // - dest is of type T*, an integral, enum or pointer type, or 208 // T is convertible to a primitive type using PrimitiveConversions 209 // - platform_load is an object of type PlatformLoad<sizeof(T)>. 210 // 211 // Then 212 // platform_load(src) 213 // must be a valid expression, returning a result convertible to T. 214 // 215 // The default implementation is a volatile load. If a platform 216 // requires more for e.g. 64 bit loads, a specialization is required 217 template<size_t byte_size> struct PlatformLoad; 218 219 // Give platforms a variation point to specialize. 220 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore; 221 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad; 222 223 private: 224 // Dispatch handler for add. Provides type-based validity checking 225 // and limited conversions around calls to the platform-specific 226 // implementation layer provided by PlatformAdd. 227 template<typename I, typename D, typename Enable = void> 228 struct AddImpl; 229 230 // Platform-specific implementation of add. Support for sizes of 4 231 // bytes and (if different) pointer size bytes are required. The 232 // class is a function object that must be default constructable, 233 // with these requirements: 234 // 235 // - dest is of type D*, an integral or pointer type. 236 // - add_value is of type I, an integral type. 237 // - sizeof(I) == sizeof(D). 238 // - if D is an integral type, I == D. 239 // - platform_add is an object of type PlatformAdd<sizeof(D)>. 240 // 241 // Then 242 // platform_add(add_value, dest) 243 // must be a valid expression, returning a result convertible to D. 244 // 245 // No definition is provided; all platforms must explicitly define 246 // this class and any needed specializations. 247 template<size_t byte_size> struct PlatformAdd; 248 249 // Helper base classes for defining PlatformAdd. To use, define 250 // PlatformAdd or a specialization that derives from one of these, 251 // and include in the PlatformAdd definition the support function 252 // (described below) required by the base class. 253 // 254 // These classes implement the required function object protocol for 255 // PlatformAdd, using a support function template provided by the 256 // derived class. Let add_value (of type I) and dest (of type D) be 257 // the arguments the object is called with. If D is a pointer type 258 // P*, then let addend (of type I) be add_value * sizeof(P); 259 // otherwise, addend is add_value. 260 // 261 // FetchAndAdd requires the derived class to provide 262 // fetch_and_add(addend, dest) 263 // atomically adding addend to the value of dest, and returning the 264 // old value. 265 // 266 // AddAndFetch requires the derived class to provide 267 // add_and_fetch(addend, dest) 268 // atomically adding addend to the value of dest, and returning the 269 // new value. 270 // 271 // When D is a pointer type P*, both fetch_and_add and add_and_fetch 272 // treat it as if it were a uintptr_t; they do not perform any 273 // scaling of the addend, as that has already been done by the 274 // caller. 275 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. 276 template<typename Derived> struct FetchAndAdd; 277 template<typename Derived> struct AddAndFetch; 278 private: 279 280 // Support for platforms that implement some variants of add using a 281 // (typically out of line) non-template helper function. The 282 // generic arguments passed to PlatformAdd need to be translated to 283 // the appropriate type for the helper function, the helper function 284 // invoked on the translated arguments, and the result translated 285 // back. Type is the parameter / return type of the helper 286 // function. No scaling of add_value is performed when D is a pointer 287 // type, so this function can be used to implement the support function 288 // required by AddAndFetch. 289 template<typename Type, typename Fn, typename I, typename D> 290 static D add_using_helper(Fn fn, I add_value, D volatile* dest); 291 292 // Dispatch handler for cmpxchg. Provides type-based validity 293 // checking and limited conversions around calls to the 294 // platform-specific implementation layer provided by 295 // PlatformCmpxchg. 296 template<typename T, typename D, typename U, typename Enable = void> 297 struct CmpxchgImpl; 298 299 // Platform-specific implementation of cmpxchg. Support for sizes 300 // of 1, 4, and 8 are required. The class is a function object that 301 // must be default constructable, with these requirements: 302 // 303 // - dest is of type T*. 304 // - exchange_value and compare_value are of type T. 305 // - order is of type atomic_memory_order. 306 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. 307 // 308 // Then 309 // platform_cmpxchg(exchange_value, dest, compare_value, order) 310 // must be a valid expression, returning a result convertible to T. 311 // 312 // A default definition is provided, which declares a function template 313 // T operator()(T, T volatile*, T, atomic_memory_order) const 314 // 315 // For each required size, a platform must either provide an 316 // appropriate definition of that function, or must entirely 317 // specialize the class template for that size. 318 template<size_t byte_size> struct PlatformCmpxchg; 319 320 // Support for platforms that implement some variants of cmpxchg 321 // using a (typically out of line) non-template helper function. 322 // The generic arguments passed to PlatformCmpxchg need to be 323 // translated to the appropriate type for the helper function, the 324 // helper invoked on the translated arguments, and the result 325 // translated back. Type is the parameter / return type of the 326 // helper function. 327 template<typename Type, typename Fn, typename T> 328 static T cmpxchg_using_helper(Fn fn, 329 T exchange_value, 330 T volatile* dest, 331 T compare_value); 332 333 // Support platforms that do not provide Read-Modify-Write 334 // byte-level atomic access. To use, derive PlatformCmpxchg<1> from 335 // this class. 336 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. 337 struct CmpxchgByteUsingInt; 338 private: 339 340 // Dispatch handler for xchg. Provides type-based validity 341 // checking and limited conversions around calls to the 342 // platform-specific implementation layer provided by 343 // PlatformXchg. 344 template<typename T, typename D, typename Enable = void> 345 struct XchgImpl; 346 347 // Platform-specific implementation of xchg. Support for sizes 348 // of 4, and sizeof(intptr_t) are required. The class is a function 349 // object that must be default constructable, with these requirements: 350 // 351 // - dest is of type T*. 352 // - exchange_value is of type T. 353 // - platform_xchg is an object of type PlatformXchg<sizeof(T)>. 354 // 355 // Then 356 // platform_xchg(exchange_value, dest) 357 // must be a valid expression, returning a result convertible to T. 358 // 359 // A default definition is provided, which declares a function template 360 // T operator()(T, T volatile*, T, atomic_memory_order) const 361 // 362 // For each required size, a platform must either provide an 363 // appropriate definition of that function, or must entirely 364 // specialize the class template for that size. 365 template<size_t byte_size> struct PlatformXchg; 366 367 // Support for platforms that implement some variants of xchg 368 // using a (typically out of line) non-template helper function. 369 // The generic arguments passed to PlatformXchg need to be 370 // translated to the appropriate type for the helper function, the 371 // helper invoked on the translated arguments, and the result 372 // translated back. Type is the parameter / return type of the 373 // helper function. 374 template<typename Type, typename Fn, typename T> 375 static T xchg_using_helper(Fn fn, 376 T exchange_value, 377 T volatile* dest); 378 }; 379 380 template<typename From, typename To> 381 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { 382 // Determine whether From* is implicitly convertible to To*, using 383 // the "sizeof trick". 384 typedef char yes; 385 typedef char (&no)[2]; 386 387 static yes test(To*); 388 static no test(...); 389 static From* test_value; 390 391 static const bool value = (sizeof(yes) == sizeof(test(test_value))); 392 }; 393 394 // Handle load for pointer, integral and enum types. 395 template<typename T, typename PlatformOp> 396 struct Atomic::LoadImpl< 397 T, 398 PlatformOp, 399 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type> 400 { 401 T operator()(T const volatile* dest) const { 402 // Forward to the platform handler for the size of T. 403 return PlatformOp()(dest); 404 } 405 }; 406 407 // Handle load for types that have a translator. 408 // 409 // All the involved types must be identical. 410 // 411 // This translates the original call into a call on the decayed 412 // arguments, and returns the recovered result of that translated 413 // call. 414 template<typename T, typename PlatformOp> 415 struct Atomic::LoadImpl< 416 T, 417 PlatformOp, 418 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 419 { 420 T operator()(T const volatile* dest) const { 421 typedef PrimitiveConversions::Translate<T> Translator; 422 typedef typename Translator::Decayed Decayed; 423 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 424 Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest)); 425 return Translator::recover(result); 426 } 427 }; 428 429 // Default implementation of atomic load if a specific platform 430 // does not provide a specialization for a certain size class. 431 // For increased safety, the default implementation only allows 432 // load types that are pointer sized or smaller. If a platform still 433 // supports wide atomics, then it has to use specialization 434 // of Atomic::PlatformLoad for that wider size class. 435 template<size_t byte_size> 436 struct Atomic::PlatformLoad { 437 template<typename T> 438 T operator()(T const volatile* dest) const { 439 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization 440 return *dest; 441 } 442 }; 443 444 // Handle store for integral and enum types. 445 // 446 // All the involved types must be identical. 447 template<typename T, typename PlatformOp> 448 struct Atomic::StoreImpl< 449 T, T, 450 PlatformOp, 451 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> 452 { 453 void operator()(T new_value, T volatile* dest) const { 454 // Forward to the platform handler for the size of T. 455 PlatformOp()(new_value, dest); 456 } 457 }; 458 459 // Handle store for pointer types. 460 // 461 // The new_value must be implicitly convertible to the 462 // destination's type; it must be type-correct to store the 463 // new_value in the destination. 464 template<typename T, typename D, typename PlatformOp> 465 struct Atomic::StoreImpl< 466 T*, D*, 467 PlatformOp, 468 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> 469 { 470 void operator()(T* new_value, D* volatile* dest) const { 471 // Allow derived to base conversion, and adding cv-qualifiers. 472 D* value = new_value; 473 PlatformOp()(value, dest); 474 } 475 }; 476 477 // Handle store for types that have a translator. 478 // 479 // All the involved types must be identical. 480 // 481 // This translates the original call into a call on the decayed 482 // arguments. 483 template<typename T, typename PlatformOp> 484 struct Atomic::StoreImpl< 485 T, T, 486 PlatformOp, 487 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 488 { 489 void operator()(T new_value, T volatile* dest) const { 490 typedef PrimitiveConversions::Translate<T> Translator; 491 typedef typename Translator::Decayed Decayed; 492 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 493 PlatformOp()(Translator::decay(new_value), 494 reinterpret_cast<Decayed volatile*>(dest)); 495 } 496 }; 497 498 // Default implementation of atomic store if a specific platform 499 // does not provide a specialization for a certain size class. 500 // For increased safety, the default implementation only allows 501 // storing types that are pointer sized or smaller. If a platform still 502 // supports wide atomics, then it has to use specialization 503 // of Atomic::PlatformStore for that wider size class. 504 template<size_t byte_size> 505 struct Atomic::PlatformStore { 506 template<typename T> 507 void operator()(T new_value, 508 T volatile* dest) const { 509 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization 510 (void)const_cast<T&>(*dest = new_value); 511 } 512 }; 513 514 // Define FetchAndAdd and AddAndFetch helper classes before including 515 // platform file, which may use these as base classes, requiring they 516 // be complete. 517 518 template<typename Derived> 519 struct Atomic::FetchAndAdd { 520 template<typename I, typename D> 521 D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; 522 }; 523 524 template<typename Derived> 525 struct Atomic::AddAndFetch { 526 template<typename I, typename D> 527 D operator()(I add_value, D volatile* dest, atomic_memory_order order) const; 528 }; 529 530 template<typename D> 531 inline void Atomic::inc(D volatile* dest, atomic_memory_order order) { 532 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); 533 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; 534 Atomic::add(I(1), dest, order); 535 } 536 537 template<typename D> 538 inline void Atomic::dec(D volatile* dest, atomic_memory_order order) { 539 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); 540 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; 541 // Assumes two's complement integer representation. 542 #pragma warning(suppress: 4146) 543 Atomic::add(I(-1), dest, order); 544 } 545 546 template<typename I, typename D> 547 inline D Atomic::sub(I sub_value, D volatile* dest, atomic_memory_order order) { 548 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); 549 STATIC_ASSERT(IsIntegral<I>::value); 550 // If D is a pointer type, use [u]intptr_t as the addend type, 551 // matching signedness of I. Otherwise, use D as the addend type. 552 typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI; 553 typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType; 554 // Only allow conversions that can't change the value. 555 STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value); 556 STATIC_ASSERT(sizeof(I) <= sizeof(AddendType)); 557 AddendType addend = sub_value; 558 // Assumes two's complement integer representation. 559 #pragma warning(suppress: 4146) // In case AddendType is not signed. 560 return Atomic::add(-addend, dest, order); 561 } 562 563 // Define the class before including platform file, which may specialize 564 // the operator definition. No generic definition of specializations 565 // of the operator template are provided, nor are there any generic 566 // specializations of the class. The platform file is responsible for 567 // providing those. 568 template<size_t byte_size> 569 struct Atomic::PlatformCmpxchg { 570 template<typename T> 571 T operator()(T exchange_value, 572 T volatile* dest, 573 T compare_value, 574 atomic_memory_order order) const; 575 }; 576 577 // Define the class before including platform file, which may use this 578 // as a base class, requiring it be complete. The definition is later 579 // in this file, near the other definitions related to cmpxchg. 580 struct Atomic::CmpxchgByteUsingInt { 581 template<typename T> 582 T operator()(T exchange_value, 583 T volatile* dest, 584 T compare_value, 585 atomic_memory_order order) const; 586 }; 587 588 // Define the class before including platform file, which may specialize 589 // the operator definition. No generic definition of specializations 590 // of the operator template are provided, nor are there any generic 591 // specializations of the class. The platform file is responsible for 592 // providing those. 593 template<size_t byte_size> 594 struct Atomic::PlatformXchg { 595 template<typename T> 596 T operator()(T exchange_value, 597 T volatile* dest, 598 atomic_memory_order order) const; 599 }; 600 601 template <ScopedFenceType T> 602 class ScopedFenceGeneral: public StackObj { 603 public: 604 void prefix() {} 605 void postfix() {} 606 }; 607 608 // The following methods can be specialized using simple template specialization 609 // in the platform specific files for optimization purposes. Otherwise the 610 // generalized variant is used. 611 612 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); } 613 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); } 614 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); } 615 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); } 616 617 template <ScopedFenceType T> 618 class ScopedFence : public ScopedFenceGeneral<T> { 619 void *const _field; 620 public: 621 ScopedFence(void *const field) : _field(field) { prefix(); } 622 ~ScopedFence() { postfix(); } 623 void prefix() { ScopedFenceGeneral<T>::prefix(); } 624 void postfix() { ScopedFenceGeneral<T>::postfix(); } 625 }; 626 627 // platform specific in-line definitions - must come before shared definitions 628 629 #include OS_CPU_HEADER(atomic) 630 631 // shared in-line definitions 632 633 // size_t casts... 634 #if (SIZE_MAX != UINTPTR_MAX) 635 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here 636 #endif 637 638 template<typename T> 639 inline T Atomic::load(const volatile T* dest) { 640 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest); 641 } 642 643 template<size_t byte_size, ScopedFenceType type> 644 struct Atomic::PlatformOrderedLoad { 645 template <typename T> 646 T operator()(const volatile T* p) const { 647 ScopedFence<type> f((void*)p); 648 return Atomic::load(p); 649 } 650 }; 651 652 template <typename T> 653 inline T Atomic::load_acquire(const volatile T* p) { 654 return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p); 655 } 656 657 template<typename T, typename D> 658 inline void Atomic::store(T store_value, volatile D* dest) { 659 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest); 660 } 661 662 template<size_t byte_size, ScopedFenceType type> 663 struct Atomic::PlatformOrderedStore { 664 template <typename T> 665 void operator()(T v, volatile T* p) const { 666 ScopedFence<type> f((void*)p); 667 Atomic::store(v, p); 668 } 669 }; 670 671 template <typename T, typename D> 672 inline void Atomic::release_store(volatile D* p, T v) { 673 StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p); 674 } 675 676 template <typename T, typename D> 677 inline void Atomic::release_store_fence(volatile D* p, T v) { 678 StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p); 679 } 680 681 template<typename I, typename D> 682 inline D Atomic::add(I add_value, D volatile* dest, 683 atomic_memory_order order) { 684 return AddImpl<I, D>()(add_value, dest, order); 685 } 686 687 template<typename I, typename D> 688 struct Atomic::AddImpl< 689 I, D, 690 typename EnableIf<IsIntegral<I>::value && 691 IsIntegral<D>::value && 692 (sizeof(I) <= sizeof(D)) && 693 (IsSigned<I>::value == IsSigned<D>::value)>::type> 694 { 695 D operator()(I add_value, D volatile* dest, atomic_memory_order order) const { 696 D addend = add_value; 697 return PlatformAdd<sizeof(D)>()(addend, dest, order); 698 } 699 }; 700 701 template<typename I, typename P> 702 struct Atomic::AddImpl< 703 I, P*, 704 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> 705 { 706 P* operator()(I add_value, P* volatile* dest, atomic_memory_order order) const { 707 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); 708 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); 709 typedef typename Conditional<IsSigned<I>::value, 710 intptr_t, 711 uintptr_t>::type CI; 712 CI addend = add_value; 713 return PlatformAdd<sizeof(P*)>()(addend, dest, order); 714 } 715 }; 716 717 template<typename Derived> 718 template<typename I, typename D> 719 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, 720 atomic_memory_order order) const { 721 I addend = add_value; 722 // If D is a pointer type P*, scale by sizeof(P). 723 if (IsPointer<D>::value) { 724 addend *= sizeof(typename RemovePointer<D>::type); 725 } 726 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); 727 return old + add_value; 728 } 729 730 template<typename Derived> 731 template<typename I, typename D> 732 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, 733 atomic_memory_order order) const { 734 // If D is a pointer type P*, scale by sizeof(P). 735 if (IsPointer<D>::value) { 736 add_value *= sizeof(typename RemovePointer<D>::type); 737 } 738 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order); 739 } 740 741 template<typename Type, typename Fn, typename I, typename D> 742 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { 743 return PrimitiveConversions::cast<D>( 744 fn(PrimitiveConversions::cast<Type>(add_value), 745 reinterpret_cast<Type volatile*>(dest))); 746 } 747 748 template<typename T, typename D, typename U> 749 inline D Atomic::cmpxchg(T exchange_value, 750 D volatile* dest, 751 U compare_value, 752 atomic_memory_order order) { 753 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order); 754 } 755 756 template<typename T, typename D> 757 inline bool Atomic::replace_if_null(T* value, D* volatile* dest, 758 atomic_memory_order order) { 759 // Presently using a trivial implementation in terms of cmpxchg. 760 // Consider adding platform support, to permit the use of compiler 761 // intrinsics like gcc's __sync_bool_compare_and_swap. 762 D* expected_null = NULL; 763 return expected_null == cmpxchg(value, dest, expected_null, order); 764 } 765 766 // Handle cmpxchg for integral and enum types. 767 // 768 // All the involved types must be identical. 769 template<typename T> 770 struct Atomic::CmpxchgImpl< 771 T, T, T, 772 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> 773 { 774 T operator()(T exchange_value, T volatile* dest, T compare_value, 775 atomic_memory_order order) const { 776 // Forward to the platform handler for the size of T. 777 return PlatformCmpxchg<sizeof(T)>()(exchange_value, 778 dest, 779 compare_value, 780 order); 781 } 782 }; 783 784 // Handle cmpxchg for pointer types. 785 // 786 // The destination's type and the compare_value type must be the same, 787 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of 788 // the compare_value. 789 // 790 // The exchange_value must be implicitly convertible to the 791 // destination's type; it must be type-correct to store the 792 // exchange_value in the destination. 793 template<typename T, typename D, typename U> 794 struct Atomic::CmpxchgImpl< 795 T*, D*, U*, 796 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && 797 IsSame<typename RemoveCV<D>::type, 798 typename RemoveCV<U>::type>::value>::type> 799 { 800 D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, 801 atomic_memory_order order) const { 802 // Allow derived to base conversion, and adding cv-qualifiers. 803 D* new_value = exchange_value; 804 // Don't care what the CV qualifiers for compare_value are, 805 // but we need to match D* when calling platform support. 806 D* old_value = const_cast<D*>(compare_value); 807 return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order); 808 } 809 }; 810 811 // Handle cmpxchg for types that have a translator. 812 // 813 // All the involved types must be identical. 814 // 815 // This translates the original call into a call on the decayed 816 // arguments, and returns the recovered result of that translated 817 // call. 818 template<typename T> 819 struct Atomic::CmpxchgImpl< 820 T, T, T, 821 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 822 { 823 T operator()(T exchange_value, T volatile* dest, T compare_value, 824 atomic_memory_order order) const { 825 typedef PrimitiveConversions::Translate<T> Translator; 826 typedef typename Translator::Decayed Decayed; 827 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 828 return Translator::recover( 829 cmpxchg(Translator::decay(exchange_value), 830 reinterpret_cast<Decayed volatile*>(dest), 831 Translator::decay(compare_value), 832 order)); 833 } 834 }; 835 836 template<typename Type, typename Fn, typename T> 837 inline T Atomic::cmpxchg_using_helper(Fn fn, 838 T exchange_value, 839 T volatile* dest, 840 T compare_value) { 841 STATIC_ASSERT(sizeof(Type) == sizeof(T)); 842 return PrimitiveConversions::cast<T>( 843 fn(PrimitiveConversions::cast<Type>(exchange_value), 844 reinterpret_cast<Type volatile*>(dest), 845 PrimitiveConversions::cast<Type>(compare_value))); 846 } 847 848 template<typename T> 849 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, 850 T volatile* dest, 851 T compare_value, 852 atomic_memory_order order) const { 853 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); 854 uint8_t canon_exchange_value = exchange_value; 855 uint8_t canon_compare_value = compare_value; 856 volatile uint32_t* aligned_dest 857 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t))); 858 size_t offset = pointer_delta(dest, aligned_dest, 1); 859 uint32_t cur = *aligned_dest; 860 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur); 861 862 // current value may not be what we are looking for, so force it 863 // to that value so the initial cmpxchg will fail if it is different 864 cur_as_bytes[offset] = canon_compare_value; 865 866 // always execute a real cmpxchg so that we get the required memory 867 // barriers even on initial failure 868 do { 869 // value to swap in matches current value ... 870 uint32_t new_value = cur; 871 // ... except for the one byte we want to update 872 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; 873 874 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); 875 if (res == cur) break; // success 876 877 // at least one byte in the int changed value, so update 878 // our view of the current int 879 cur = res; 880 // if our byte is still as cur we loop and try again 881 } while (cur_as_bytes[offset] == canon_compare_value); 882 883 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]); 884 } 885 886 // Handle xchg for integral and enum types. 887 // 888 // All the involved types must be identical. 889 template<typename T> 890 struct Atomic::XchgImpl< 891 T, T, 892 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> 893 { 894 T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { 895 // Forward to the platform handler for the size of T. 896 return PlatformXchg<sizeof(T)>()(exchange_value, dest, order); 897 } 898 }; 899 900 // Handle xchg for pointer types. 901 // 902 // The exchange_value must be implicitly convertible to the 903 // destination's type; it must be type-correct to store the 904 // exchange_value in the destination. 905 template<typename T, typename D> 906 struct Atomic::XchgImpl< 907 T*, D*, 908 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> 909 { 910 D* operator()(T* exchange_value, D* volatile* dest, atomic_memory_order order) const { 911 // Allow derived to base conversion, and adding cv-qualifiers. 912 D* new_value = exchange_value; 913 return PlatformXchg<sizeof(D*)>()(new_value, dest, order); 914 } 915 }; 916 917 // Handle xchg for types that have a translator. 918 // 919 // All the involved types must be identical. 920 // 921 // This translates the original call into a call on the decayed 922 // arguments, and returns the recovered result of that translated 923 // call. 924 template<typename T> 925 struct Atomic::XchgImpl< 926 T, T, 927 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 928 { 929 T operator()(T exchange_value, T volatile* dest, atomic_memory_order order) const { 930 typedef PrimitiveConversions::Translate<T> Translator; 931 typedef typename Translator::Decayed Decayed; 932 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 933 return Translator::recover( 934 xchg(Translator::decay(exchange_value), 935 reinterpret_cast<Decayed volatile*>(dest), 936 order)); 937 } 938 }; 939 940 template<typename Type, typename Fn, typename T> 941 inline T Atomic::xchg_using_helper(Fn fn, 942 T exchange_value, 943 T volatile* dest) { 944 STATIC_ASSERT(sizeof(Type) == sizeof(T)); 945 return PrimitiveConversions::cast<T>( 946 fn(PrimitiveConversions::cast<Type>(exchange_value), 947 reinterpret_cast<Type volatile*>(dest))); 948 } 949 950 template<typename T, typename D> 951 inline D Atomic::xchg(T exchange_value, volatile D* dest, atomic_memory_order order) { 952 return XchgImpl<T, D>()(exchange_value, dest, order); 953 } 954 955 #endif // SHARE_RUNTIME_ATOMIC_HPP