1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP 26 #define SHARE_VM_RUNTIME_ATOMIC_HPP 27 28 #include "memory/allocation.hpp" 29 #include "metaprogramming/conditional.hpp" 30 #include "metaprogramming/enableIf.hpp" 31 #include "metaprogramming/isIntegral.hpp" 32 #include "metaprogramming/isPointer.hpp" 33 #include "metaprogramming/isSame.hpp" 34 #include "metaprogramming/primitiveConversions.hpp" 35 #include "metaprogramming/removeCV.hpp" 36 #include "metaprogramming/removePointer.hpp" 37 #include "utilities/align.hpp" 38 #include "utilities/macros.hpp" 39 40 enum cmpxchg_memory_order { 41 memory_order_relaxed = 0, 42 memory_order_acquire = 2, 43 memory_order_release = 3, 44 memory_order_acq_rel = 4, 45 // Use value which doesn't interfere with C++2011. We need to be more conservative. 46 memory_order_conservative = 8 47 }; 48 49 class Atomic : AllStatic { 50 public: 51 // Atomic operations on int64 types are not available on all 32-bit 52 // platforms. If atomic ops on int64 are defined here they must only 53 // be used from code that verifies they are available at runtime and 54 // can provide an alternative action if not - see supports_cx8() for 55 // a means to test availability. 56 57 // The memory operations that are mentioned with each of the atomic 58 // function families come from src/share/vm/runtime/orderAccess.hpp, 59 // e.g., <fence> is described in that file and is implemented by the 60 // OrderAccess::fence() function. See that file for the gory details 61 // on the Memory Access Ordering Model. 62 63 // All of the atomic operations that imply a read-modify-write action 64 // guarantee a two-way memory barrier across that operation. Historically 65 // these semantics reflect the strength of atomic operations that are 66 // provided on SPARC/X86. We assume that strength is necessary unless 67 // we can prove that a weaker form is sufficiently safe. 68 69 // Atomically store to a location 70 // The type T must be either a pointer type convertible to or equal 71 // to D, an integral/enum type equal to D, or a type equal to D that 72 // is primitive convertible using PrimitiveConversions. 73 template<typename T, typename D> 74 inline static void store(T store_value, volatile D* dest); 75 76 // Atomically load from a location 77 // The type T must be either a pointer type, an integral/enum type, 78 // or a type that is primitive convertible using PrimitiveConversions. 79 template<typename T> 80 inline static T load(const volatile T* dest); 81 82 // Atomically add to a location. Returns updated value. add*() provide: 83 // <fence> add-value-to-dest <membar StoreLoad|StoreStore> 84 85 template<typename I, typename D> 86 inline static D add(I add_value, D volatile* dest, 87 cmpxchg_memory_order order = memory_order_acq_rel); 88 89 template<typename I, typename D> 90 inline static D sub(I sub_value, D volatile* dest); 91 92 // Atomically increment location. inc() provide: 93 // <fence> increment-dest <membar StoreLoad|StoreStore> 94 // The type D may be either a pointer type, or an integral 95 // type. If it is a pointer type, then the increment is 96 // scaled to the size of the type pointed to by the pointer. 97 template<typename D> 98 inline static void inc(D volatile* dest); 99 100 // Atomically decrement a location. dec() provide: 101 // <fence> decrement-dest <membar StoreLoad|StoreStore> 102 // The type D may be either a pointer type, or an integral 103 // type. If it is a pointer type, then the decrement is 104 // scaled to the size of the type pointed to by the pointer. 105 template<typename D> 106 inline static void dec(D volatile* dest); 107 108 // Performs atomic exchange of *dest with exchange_value. Returns old 109 // prior value of *dest. xchg*() provide: 110 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore> 111 // The type T must be either a pointer type convertible to or equal 112 // to D, an integral/enum type equal to D, or a type equal to D that 113 // is primitive convertible using PrimitiveConversions. 114 template<typename T, typename D> 115 inline static D xchg(T exchange_value, volatile D* dest); 116 117 // Performs atomic compare of *dest and compare_value, and exchanges 118 // *dest with exchange_value if the comparison succeeded. Returns prior 119 // value of *dest. cmpxchg*() provide: 120 // <fence> compare-and-exchange <membar StoreLoad|StoreStore> 121 122 template<typename T, typename D, typename U> 123 inline static D cmpxchg(T exchange_value, 124 D volatile* dest, 125 U compare_value, 126 cmpxchg_memory_order order = memory_order_conservative); 127 128 // Performs atomic compare of *dest and NULL, and replaces *dest 129 // with exchange_value if the comparison succeeded. Returns true if 130 // the comparison succeeded and the exchange occurred. This is 131 // often used as part of lazy initialization, as a lock-free 132 // alternative to the Double-Checked Locking Pattern. 133 template<typename T, typename D> 134 inline static bool replace_if_null(T* value, D* volatile* dest, 135 cmpxchg_memory_order order = memory_order_conservative); 136 137 private: 138 WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointerConvertible is declared private 139 // Test whether From is implicitly convertible to To. 140 // From and To must be pointer types. 141 // Note: Provides the limited subset of C++11 std::is_convertible 142 // that is needed here. 143 template<typename From, typename To> struct IsPointerConvertible; 144 145 protected: 146 // Dispatch handler for store. Provides type-based validity 147 // checking and limited conversions around calls to the platform- 148 // specific implementation layer provided by PlatformOp. 149 template<typename T, typename D, typename PlatformOp, typename Enable = void> 150 struct StoreImpl; 151 152 // Platform-specific implementation of store. Support for sizes 153 // of 1, 2, 4, and (if different) pointer size bytes are required. 154 // The class is a function object that must be default constructable, 155 // with these requirements: 156 // 157 // either: 158 // - dest is of type D*, an integral, enum or pointer type. 159 // - new_value are of type T, an integral, enum or pointer type D or 160 // pointer type convertible to D. 161 // or: 162 // - T and D are the same and are primitive convertible using PrimitiveConversions 163 // and either way: 164 // - platform_store is an object of type PlatformStore<sizeof(T)>. 165 // 166 // Then 167 // platform_store(new_value, dest) 168 // must be a valid expression. 169 // 170 // The default implementation is a volatile store. If a platform 171 // requires more for e.g. 64 bit stores, a specialization is required 172 template<size_t byte_size> struct PlatformStore; 173 174 // Dispatch handler for load. Provides type-based validity 175 // checking and limited conversions around calls to the platform- 176 // specific implementation layer provided by PlatformOp. 177 template<typename T, typename PlatformOp, typename Enable = void> 178 struct LoadImpl; 179 180 // Platform-specific implementation of load. Support for sizes of 181 // 1, 2, 4 bytes and (if different) pointer size bytes are required. 182 // The class is a function object that must be default 183 // constructable, with these requirements: 184 // 185 // - dest is of type T*, an integral, enum or pointer type, or 186 // T is convertible to a primitive type using PrimitiveConversions 187 // - platform_load is an object of type PlatformLoad<sizeof(T)>. 188 // 189 // Then 190 // platform_load(src) 191 // must be a valid expression, returning a result convertible to T. 192 // 193 // The default implementation is a volatile load. If a platform 194 // requires more for e.g. 64 bit loads, a specialization is required 195 template<size_t byte_size> struct PlatformLoad; 196 197 private: 198 // Dispatch handler for add. Provides type-based validity checking 199 // and limited conversions around calls to the platform-specific 200 // implementation layer provided by PlatformAdd. 201 template<typename I, typename D, typename Enable = void> 202 struct AddImpl; 203 204 // Platform-specific implementation of add. Support for sizes of 4 205 // bytes and (if different) pointer size bytes are required. The 206 // class is a function object that must be default constructable, 207 // with these requirements: 208 // 209 // - dest is of type D*, an integral or pointer type. 210 // - add_value is of type I, an integral type. 211 // - sizeof(I) == sizeof(D). 212 // - if D is an integral type, I == D. 213 // - platform_add is an object of type PlatformAdd<sizeof(D)>. 214 // 215 // Then 216 // platform_add(add_value, dest) 217 // must be a valid expression, returning a result convertible to D. 218 // 219 // No definition is provided; all platforms must explicitly define 220 // this class and any needed specializations. 221 template<size_t byte_size> struct PlatformAdd; 222 223 // Helper base classes for defining PlatformAdd. To use, define 224 // PlatformAdd or a specialization that derives from one of these, 225 // and include in the PlatformAdd definition the support function 226 // (described below) required by the base class. 227 // 228 // These classes implement the required function object protocol for 229 // PlatformAdd, using a support function template provided by the 230 // derived class. Let add_value (of type I) and dest (of type D) be 231 // the arguments the object is called with. If D is a pointer type 232 // P*, then let addend (of type I) be add_value * sizeof(P); 233 // otherwise, addend is add_value. 234 // 235 // FetchAndAdd requires the derived class to provide 236 // fetch_and_add(addend, dest) 237 // atomically adding addend to the value of dest, and returning the 238 // old value. 239 // 240 // AddAndFetch requires the derived class to provide 241 // add_and_fetch(addend, dest) 242 // atomically adding addend to the value of dest, and returning the 243 // new value. 244 // 245 // When D is a pointer type P*, both fetch_and_add and add_and_fetch 246 // treat it as if it were a uintptr_t; they do not perform any 247 // scaling of the addend, as that has already been done by the 248 // caller. 249 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. 250 template<typename Derived> struct FetchAndAdd; 251 template<typename Derived> struct AddAndFetch; 252 private: 253 254 // Support for platforms that implement some variants of add using a 255 // (typically out of line) non-template helper function. The 256 // generic arguments passed to PlatformAdd need to be translated to 257 // the appropriate type for the helper function, the helper function 258 // invoked on the translated arguments, and the result translated 259 // back. Type is the parameter / return type of the helper 260 // function. No scaling of add_value is performed when D is a pointer 261 // type, so this function can be used to implement the support function 262 // required by AddAndFetch. 263 template<typename Type, typename Fn, typename I, typename D> 264 static D add_using_helper(Fn fn, I add_value, D volatile* dest); 265 266 // Dispatch handler for cmpxchg. Provides type-based validity 267 // checking and limited conversions around calls to the 268 // platform-specific implementation layer provided by 269 // PlatformCmpxchg. 270 template<typename T, typename D, typename U, typename Enable = void> 271 struct CmpxchgImpl; 272 273 // Platform-specific implementation of cmpxchg. Support for sizes 274 // of 1, 4, and 8 are required. The class is a function object that 275 // must be default constructable, with these requirements: 276 // 277 // - dest is of type T*. 278 // - exchange_value and compare_value are of type T. 279 // - order is of type cmpxchg_memory_order. 280 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>. 281 // 282 // Then 283 // platform_cmpxchg(exchange_value, dest, compare_value, order) 284 // must be a valid expression, returning a result convertible to T. 285 // 286 // A default definition is provided, which declares a function template 287 // T operator()(T, T volatile*, T, cmpxchg_memory_order) const 288 // 289 // For each required size, a platform must either provide an 290 // appropriate definition of that function, or must entirely 291 // specialize the class template for that size. 292 template<size_t byte_size> struct PlatformCmpxchg; 293 294 // Support for platforms that implement some variants of cmpxchg 295 // using a (typically out of line) non-template helper function. 296 // The generic arguments passed to PlatformCmpxchg need to be 297 // translated to the appropriate type for the helper function, the 298 // helper invoked on the translated arguments, and the result 299 // translated back. Type is the parameter / return type of the 300 // helper function. 301 template<typename Type, typename Fn, typename T> 302 static T cmpxchg_using_helper(Fn fn, 303 T exchange_value, 304 T volatile* dest, 305 T compare_value); 306 307 // Support platforms that do not provide Read-Modify-Write 308 // byte-level atomic access. To use, derive PlatformCmpxchg<1> from 309 // this class. 310 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. 311 struct CmpxchgByteUsingInt; 312 private: 313 314 // Dispatch handler for xchg. Provides type-based validity 315 // checking and limited conversions around calls to the 316 // platform-specific implementation layer provided by 317 // PlatformXchg. 318 template<typename T, typename D, typename Enable = void> 319 struct XchgImpl; 320 321 // Platform-specific implementation of xchg. Support for sizes 322 // of 4, and sizeof(intptr_t) are required. The class is a function 323 // object that must be default constructable, with these requirements: 324 // 325 // - dest is of type T*. 326 // - exchange_value is of type T. 327 // - platform_xchg is an object of type PlatformXchg<sizeof(T)>. 328 // 329 // Then 330 // platform_xchg(exchange_value, dest) 331 // must be a valid expression, returning a result convertible to T. 332 // 333 // A default definition is provided, which declares a function template 334 // T operator()(T, T volatile*, T, cmpxchg_memory_order) const 335 // 336 // For each required size, a platform must either provide an 337 // appropriate definition of that function, or must entirely 338 // specialize the class template for that size. 339 template<size_t byte_size> struct PlatformXchg; 340 341 // Support for platforms that implement some variants of xchg 342 // using a (typically out of line) non-template helper function. 343 // The generic arguments passed to PlatformXchg need to be 344 // translated to the appropriate type for the helper function, the 345 // helper invoked on the translated arguments, and the result 346 // translated back. Type is the parameter / return type of the 347 // helper function. 348 template<typename Type, typename Fn, typename T> 349 static T xchg_using_helper(Fn fn, 350 T exchange_value, 351 T volatile* dest); 352 }; 353 354 template<typename From, typename To> 355 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic { 356 // Determine whether From* is implicitly convertible to To*, using 357 // the "sizeof trick". 358 typedef char yes; 359 typedef char (&no)[2]; 360 361 static yes test(To*); 362 static no test(...); 363 static From* test_value; 364 365 static const bool value = (sizeof(yes) == sizeof(test(test_value))); 366 }; 367 368 // Handle load for pointer, integral and enum types. 369 template<typename T, typename PlatformOp> 370 struct Atomic::LoadImpl< 371 T, 372 PlatformOp, 373 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value || IsPointer<T>::value>::type> 374 { 375 T operator()(T const volatile* dest) const { 376 // Forward to the platform handler for the size of T. 377 return PlatformOp()(dest); 378 } 379 }; 380 381 // Handle load for types that have a translator. 382 // 383 // All the involved types must be identical. 384 // 385 // This translates the original call into a call on the decayed 386 // arguments, and returns the recovered result of that translated 387 // call. 388 template<typename T, typename PlatformOp> 389 struct Atomic::LoadImpl< 390 T, 391 PlatformOp, 392 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 393 { 394 T operator()(T const volatile* dest) const { 395 typedef PrimitiveConversions::Translate<T> Translator; 396 typedef typename Translator::Decayed Decayed; 397 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 398 Decayed result = PlatformOp()(reinterpret_cast<Decayed const volatile*>(dest)); 399 return Translator::recover(result); 400 } 401 }; 402 403 // Default implementation of atomic load if a specific platform 404 // does not provide a specialization for a certain size class. 405 // For increased safety, the default implementation only allows 406 // load types that are pointer sized or smaller. If a platform still 407 // supports wide atomics, then it has to use specialization 408 // of Atomic::PlatformLoad for that wider size class. 409 template<size_t byte_size> 410 struct Atomic::PlatformLoad { 411 template<typename T> 412 T operator()(T const volatile* dest) const { 413 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization 414 return *dest; 415 } 416 }; 417 418 // Handle store for integral and enum types. 419 // 420 // All the involved types must be identical. 421 template<typename T, typename PlatformOp> 422 struct Atomic::StoreImpl< 423 T, T, 424 PlatformOp, 425 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> 426 { 427 void operator()(T new_value, T volatile* dest) const { 428 // Forward to the platform handler for the size of T. 429 PlatformOp()(new_value, dest); 430 } 431 }; 432 433 // Handle store for pointer types. 434 // 435 // The new_value must be implicitly convertible to the 436 // destination's type; it must be type-correct to store the 437 // new_value in the destination. 438 template<typename T, typename D, typename PlatformOp> 439 struct Atomic::StoreImpl< 440 T*, D*, 441 PlatformOp, 442 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> 443 { 444 void operator()(T* new_value, D* volatile* dest) const { 445 // Allow derived to base conversion, and adding cv-qualifiers. 446 D* value = new_value; 447 PlatformOp()(value, dest); 448 } 449 }; 450 451 // Handle store for types that have a translator. 452 // 453 // All the involved types must be identical. 454 // 455 // This translates the original call into a call on the decayed 456 // arguments. 457 template<typename T, typename PlatformOp> 458 struct Atomic::StoreImpl< 459 T, T, 460 PlatformOp, 461 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 462 { 463 void operator()(T new_value, T volatile* dest) const { 464 typedef PrimitiveConversions::Translate<T> Translator; 465 typedef typename Translator::Decayed Decayed; 466 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 467 PlatformOp()(Translator::decay(new_value), 468 reinterpret_cast<Decayed volatile*>(dest)); 469 } 470 }; 471 472 // Default implementation of atomic store if a specific platform 473 // does not provide a specialization for a certain size class. 474 // For increased safety, the default implementation only allows 475 // storing types that are pointer sized or smaller. If a platform still 476 // supports wide atomics, then it has to use specialization 477 // of Atomic::PlatformStore for that wider size class. 478 template<size_t byte_size> 479 struct Atomic::PlatformStore { 480 template<typename T> 481 void operator()(T new_value, 482 T volatile* dest) const { 483 STATIC_ASSERT(sizeof(T) <= sizeof(void*)); // wide atomics need specialization 484 (void)const_cast<T&>(*dest = new_value); 485 } 486 }; 487 488 // Define FetchAndAdd and AddAndFetch helper classes before including 489 // platform file, which may use these as base classes, requiring they 490 // be complete. 491 492 template<typename Derived> 493 struct Atomic::FetchAndAdd { 494 template<typename I, typename D> 495 D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const; 496 }; 497 498 template<typename Derived> 499 struct Atomic::AddAndFetch { 500 template<typename I, typename D> 501 D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const; 502 }; 503 504 template<typename D> 505 inline void Atomic::inc(D volatile* dest) { 506 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); 507 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; 508 Atomic::add(I(1), dest); 509 } 510 511 template<typename D> 512 inline void Atomic::dec(D volatile* dest) { 513 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); 514 typedef typename Conditional<IsPointer<D>::value, ptrdiff_t, D>::type I; 515 // Assumes two's complement integer representation. 516 #pragma warning(suppress: 4146) 517 Atomic::add(I(-1), dest); 518 } 519 520 template<typename I, typename D> 521 inline D Atomic::sub(I sub_value, D volatile* dest) { 522 STATIC_ASSERT(IsPointer<D>::value || IsIntegral<D>::value); 523 STATIC_ASSERT(IsIntegral<I>::value); 524 // If D is a pointer type, use [u]intptr_t as the addend type, 525 // matching signedness of I. Otherwise, use D as the addend type. 526 typedef typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type PI; 527 typedef typename Conditional<IsPointer<D>::value, PI, D>::type AddendType; 528 // Only allow conversions that can't change the value. 529 STATIC_ASSERT(IsSigned<I>::value == IsSigned<AddendType>::value); 530 STATIC_ASSERT(sizeof(I) <= sizeof(AddendType)); 531 AddendType addend = sub_value; 532 // Assumes two's complement integer representation. 533 #pragma warning(suppress: 4146) // In case AddendType is not signed. 534 return Atomic::add(-addend, dest); 535 } 536 537 // Define the class before including platform file, which may specialize 538 // the operator definition. No generic definition of specializations 539 // of the operator template are provided, nor are there any generic 540 // specializations of the class. The platform file is responsible for 541 // providing those. 542 template<size_t byte_size> 543 struct Atomic::PlatformCmpxchg { 544 template<typename T> 545 T operator()(T exchange_value, 546 T volatile* dest, 547 T compare_value, 548 cmpxchg_memory_order order) const; 549 }; 550 551 // Define the class before including platform file, which may use this 552 // as a base class, requiring it be complete. The definition is later 553 // in this file, near the other definitions related to cmpxchg. 554 struct Atomic::CmpxchgByteUsingInt { 555 template<typename T> 556 T operator()(T exchange_value, 557 T volatile* dest, 558 T compare_value, 559 cmpxchg_memory_order order) const; 560 }; 561 562 // Define the class before including platform file, which may specialize 563 // the operator definition. No generic definition of specializations 564 // of the operator template are provided, nor are there any generic 565 // specializations of the class. The platform file is responsible for 566 // providing those. 567 template<size_t byte_size> 568 struct Atomic::PlatformXchg { 569 template<typename T> 570 T operator()(T exchange_value, 571 T volatile* dest) const; 572 }; 573 574 // platform specific in-line definitions - must come before shared definitions 575 576 #include OS_CPU_HEADER(atomic) 577 578 // shared in-line definitions 579 580 // size_t casts... 581 #if (SIZE_MAX != UINTPTR_MAX) 582 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here 583 #endif 584 585 template<typename T> 586 inline T Atomic::load(const volatile T* dest) { 587 return LoadImpl<T, PlatformLoad<sizeof(T)> >()(dest); 588 } 589 590 template<typename T, typename D> 591 inline void Atomic::store(T store_value, volatile D* dest) { 592 StoreImpl<T, D, PlatformStore<sizeof(D)> >()(store_value, dest); 593 } 594 595 template<typename I, typename D> 596 inline D Atomic::add(I add_value, D volatile* dest, 597 cmpxchg_memory_order order) { 598 return AddImpl<I, D>()(add_value, dest, order); 599 } 600 601 template<typename I, typename D> 602 struct Atomic::AddImpl< 603 I, D, 604 typename EnableIf<IsIntegral<I>::value && 605 IsIntegral<D>::value && 606 (sizeof(I) <= sizeof(D)) && 607 (IsSigned<I>::value == IsSigned<D>::value)>::type> 608 { 609 D operator()(I add_value, D volatile* dest, cmpxchg_memory_order order) const { 610 D addend = add_value; 611 return PlatformAdd<sizeof(D)>()(addend, dest, order); 612 } 613 }; 614 615 template<typename I, typename P> 616 struct Atomic::AddImpl< 617 I, P*, 618 typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type> 619 { 620 P* operator()(I add_value, P* volatile* dest, cmpxchg_memory_order order) const { 621 STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*)); 622 STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*)); 623 typedef typename Conditional<IsSigned<I>::value, 624 intptr_t, 625 uintptr_t>::type CI; 626 CI addend = add_value; 627 return PlatformAdd<sizeof(P*)>()(addend, dest, order); 628 } 629 }; 630 631 // Most platforms do not support atomic add on a 2-byte value. However, 632 // if the value occupies the most significant 16 bits of an aligned 32-bit 633 // word, then we can do this with an atomic add of (add_value << 16) 634 // to the 32-bit word. 635 // 636 // The least significant parts of this 32-bit word will never be affected, even 637 // in case of overflow/underflow. 638 // 639 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. 640 template<> 641 struct Atomic::AddImpl<short, short> { 642 short operator()(short add_value, short volatile* dest, cmpxchg_memory_order order) const { 643 #ifdef VM_LITTLE_ENDIAN 644 assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); 645 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1), order); 646 #else 647 assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); 648 int new_value = Atomic::add(add_value << 16, (volatile int*)(dest), order); 649 #endif 650 return (short)(new_value >> 16); // preserves sign 651 } 652 }; 653 654 template<typename Derived> 655 template<typename I, typename D> 656 inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest, 657 cmpxchg_memory_order order) const { 658 I addend = add_value; 659 // If D is a pointer type P*, scale by sizeof(P). 660 if (IsPointer<D>::value) { 661 addend *= sizeof(typename RemovePointer<D>::type); 662 } 663 D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest, order); 664 return old + add_value; 665 } 666 667 template<typename Derived> 668 template<typename I, typename D> 669 inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest, 670 cmpxchg_memory_order order) const { 671 // If D is a pointer type P*, scale by sizeof(P). 672 if (IsPointer<D>::value) { 673 add_value *= sizeof(typename RemovePointer<D>::type); 674 } 675 return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest, order); 676 } 677 678 template<typename Type, typename Fn, typename I, typename D> 679 inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) { 680 return PrimitiveConversions::cast<D>( 681 fn(PrimitiveConversions::cast<Type>(add_value), 682 reinterpret_cast<Type volatile*>(dest))); 683 } 684 685 template<typename T, typename D, typename U> 686 inline D Atomic::cmpxchg(T exchange_value, 687 D volatile* dest, 688 U compare_value, 689 cmpxchg_memory_order order) { 690 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order); 691 } 692 693 template<typename T, typename D> 694 inline bool Atomic::replace_if_null(T* value, D* volatile* dest, 695 cmpxchg_memory_order order) { 696 // Presently using a trivial implementation in terms of cmpxchg. 697 // Consider adding platform support, to permit the use of compiler 698 // intrinsics like gcc's __sync_bool_compare_and_swap. 699 D* expected_null = NULL; 700 return expected_null == cmpxchg(value, dest, expected_null, order); 701 } 702 703 // Handle cmpxchg for integral and enum types. 704 // 705 // All the involved types must be identical. 706 template<typename T> 707 struct Atomic::CmpxchgImpl< 708 T, T, T, 709 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> 710 { 711 T operator()(T exchange_value, T volatile* dest, T compare_value, 712 cmpxchg_memory_order order) const { 713 // Forward to the platform handler for the size of T. 714 return PlatformCmpxchg<sizeof(T)>()(exchange_value, 715 dest, 716 compare_value, 717 order); 718 } 719 }; 720 721 // Handle cmpxchg for pointer types. 722 // 723 // The destination's type and the compare_value type must be the same, 724 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of 725 // the compare_value. 726 // 727 // The exchange_value must be implicitly convertible to the 728 // destination's type; it must be type-correct to store the 729 // exchange_value in the destination. 730 template<typename T, typename D, typename U> 731 struct Atomic::CmpxchgImpl< 732 T*, D*, U*, 733 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value && 734 IsSame<typename RemoveCV<D>::type, 735 typename RemoveCV<U>::type>::value>::type> 736 { 737 D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, 738 cmpxchg_memory_order order) const { 739 // Allow derived to base conversion, and adding cv-qualifiers. 740 D* new_value = exchange_value; 741 // Don't care what the CV qualifiers for compare_value are, 742 // but we need to match D* when calling platform support. 743 D* old_value = const_cast<D*>(compare_value); 744 return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order); 745 } 746 }; 747 748 // Handle cmpxchg for types that have a translator. 749 // 750 // All the involved types must be identical. 751 // 752 // This translates the original call into a call on the decayed 753 // arguments, and returns the recovered result of that translated 754 // call. 755 template<typename T> 756 struct Atomic::CmpxchgImpl< 757 T, T, T, 758 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 759 { 760 T operator()(T exchange_value, T volatile* dest, T compare_value, 761 cmpxchg_memory_order order) const { 762 typedef PrimitiveConversions::Translate<T> Translator; 763 typedef typename Translator::Decayed Decayed; 764 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 765 return Translator::recover( 766 cmpxchg(Translator::decay(exchange_value), 767 reinterpret_cast<Decayed volatile*>(dest), 768 Translator::decay(compare_value), 769 order)); 770 } 771 }; 772 773 template<typename Type, typename Fn, typename T> 774 inline T Atomic::cmpxchg_using_helper(Fn fn, 775 T exchange_value, 776 T volatile* dest, 777 T compare_value) { 778 STATIC_ASSERT(sizeof(Type) == sizeof(T)); 779 return PrimitiveConversions::cast<T>( 780 fn(PrimitiveConversions::cast<Type>(exchange_value), 781 reinterpret_cast<Type volatile*>(dest), 782 PrimitiveConversions::cast<Type>(compare_value))); 783 } 784 785 template<typename T> 786 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, 787 T volatile* dest, 788 T compare_value, 789 cmpxchg_memory_order order) const { 790 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); 791 uint8_t canon_exchange_value = exchange_value; 792 uint8_t canon_compare_value = compare_value; 793 volatile uint32_t* aligned_dest 794 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t))); 795 size_t offset = pointer_delta(dest, aligned_dest, 1); 796 uint32_t cur = *aligned_dest; 797 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur); 798 799 // current value may not be what we are looking for, so force it 800 // to that value so the initial cmpxchg will fail if it is different 801 cur_as_bytes[offset] = canon_compare_value; 802 803 // always execute a real cmpxchg so that we get the required memory 804 // barriers even on initial failure 805 do { 806 // value to swap in matches current value ... 807 uint32_t new_value = cur; 808 // ... except for the one byte we want to update 809 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value; 810 811 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); 812 if (res == cur) break; // success 813 814 // at least one byte in the int changed value, so update 815 // our view of the current int 816 cur = res; 817 // if our byte is still as cur we loop and try again 818 } while (cur_as_bytes[offset] == canon_compare_value); 819 820 return PrimitiveConversions::cast<T>(cur_as_bytes[offset]); 821 } 822 823 // Handle xchg for integral and enum types. 824 // 825 // All the involved types must be identical. 826 template<typename T> 827 struct Atomic::XchgImpl< 828 T, T, 829 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type> 830 { 831 T operator()(T exchange_value, T volatile* dest) const { 832 // Forward to the platform handler for the size of T. 833 return PlatformXchg<sizeof(T)>()(exchange_value, dest); 834 } 835 }; 836 837 // Handle xchg for pointer types. 838 // 839 // The exchange_value must be implicitly convertible to the 840 // destination's type; it must be type-correct to store the 841 // exchange_value in the destination. 842 template<typename T, typename D> 843 struct Atomic::XchgImpl< 844 T*, D*, 845 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value>::type> 846 { 847 D* operator()(T* exchange_value, D* volatile* dest) const { 848 // Allow derived to base conversion, and adding cv-qualifiers. 849 D* new_value = exchange_value; 850 return PlatformXchg<sizeof(D*)>()(new_value, dest); 851 } 852 }; 853 854 // Handle xchg for types that have a translator. 855 // 856 // All the involved types must be identical. 857 // 858 // This translates the original call into a call on the decayed 859 // arguments, and returns the recovered result of that translated 860 // call. 861 template<typename T> 862 struct Atomic::XchgImpl< 863 T, T, 864 typename EnableIf<PrimitiveConversions::Translate<T>::value>::type> 865 { 866 T operator()(T exchange_value, T volatile* dest) const { 867 typedef PrimitiveConversions::Translate<T> Translator; 868 typedef typename Translator::Decayed Decayed; 869 STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); 870 return Translator::recover( 871 xchg(Translator::decay(exchange_value), 872 reinterpret_cast<Decayed volatile*>(dest))); 873 } 874 }; 875 876 template<typename Type, typename Fn, typename T> 877 inline T Atomic::xchg_using_helper(Fn fn, 878 T exchange_value, 879 T volatile* dest) { 880 STATIC_ASSERT(sizeof(Type) == sizeof(T)); 881 return PrimitiveConversions::cast<T>( 882 fn(PrimitiveConversions::cast<Type>(exchange_value), 883 reinterpret_cast<Type volatile*>(dest))); 884 } 885 886 template<typename T, typename D> 887 inline D Atomic::xchg(T exchange_value, volatile D* dest) { 888 return XchgImpl<T, D>()(exchange_value, dest); 889 } 890 891 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP