< prev index next >

src/hotspot/share/oops/accessBackend.hpp

Print this page




  85   template <DecoratorSet decorators>
  86   inline typename HeapOopType<decorators>::type*
  87   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  88     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  89              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  90   }
  91 
  92   // This metafunction returns whether it is possible for a type T to require
  93   // locking to support wide atomics or not.
  94   template <typename T>
  95 #ifdef SUPPORTS_NATIVE_CX8
  96   struct PossiblyLockedAccess: public IntegralConstant<bool, false> {};
  97 #else
  98   struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {};
  99 #endif
 100 
 101   template <DecoratorSet decorators, typename T>
 102   struct AccessFunctionTypes {
 103     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
 104     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
 105     typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value);
 106     typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset);
 107 
 108     typedef T (*load_func_t)(void* addr);
 109     typedef void (*store_func_t)(void* addr, T value);
 110     typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value);
 111     typedef T (*atomic_xchg_func_t)(T new_value, void* addr);
 112 
 113     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 114                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 115                                      size_t length);
 116     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 117     typedef oop (*resolve_func_t)(oop obj);
 118   };
 119 
 120   template <DecoratorSet decorators>
 121   struct AccessFunctionTypes<decorators, void> {
 122     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 123                                      arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 124                                      size_t length);
 125   };
 126 
 127   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 128 
 129 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 130   template <DecoratorSet decorators, typename T>                    \
 131   struct AccessFunction<decorators, T, bt>: AllStatic{              \


 276     HasDecorator<ds, MO_RELAXED>::value>::type
 277   store_internal(void* addr, T value);
 278 
 279   template <DecoratorSet ds, typename T>
 280   static inline typename EnableIf<
 281     HasDecorator<ds, MO_VOLATILE>::value>::type
 282   store_internal(void* addr, T value) {
 283     (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value);
 284   }
 285 
 286   template <DecoratorSet ds, typename T>
 287   static inline typename EnableIf<
 288     HasDecorator<ds, MO_UNORDERED>::value>::type
 289   store_internal(void* addr, T value) {
 290     *reinterpret_cast<T*>(addr) = value;
 291   }
 292 
 293   template <DecoratorSet ds, typename T>
 294   static typename EnableIf<
 295     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 296   atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
 297 
 298   template <DecoratorSet ds, typename T>
 299   static typename EnableIf<
 300     HasDecorator<ds, MO_RELAXED>::value, T>::type
 301   atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
 302 
 303   template <DecoratorSet ds, typename T>
 304   static typename EnableIf<
 305     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 306   atomic_xchg_internal(T new_value, void* addr);
 307 
 308   // The following *_locked mechanisms serve the purpose of handling atomic operations
 309   // that are larger than a machine can handle, and then possibly opt for using
 310   // a slower path using a mutex to perform the operation.
 311 
 312   template <DecoratorSet ds, typename T>
 313   static inline typename EnableIf<
 314     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 315   atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
 316     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
 317   }
 318 
 319   template <DecoratorSet ds, typename T>
 320   static typename EnableIf<
 321     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 322   atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value);
 323 
 324   template <DecoratorSet ds, typename T>
 325   static inline typename EnableIf<
 326     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 327   atomic_xchg_maybe_locked(T new_value, void* addr) {
 328     return atomic_xchg_internal<ds>(new_value, addr);
 329   }
 330 
 331   template <DecoratorSet ds, typename T>
 332   static typename EnableIf<
 333     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 334   atomic_xchg_maybe_locked(T new_value, void* addr);
 335 
 336 public:
 337   template <typename T>
 338   static inline void store(void* addr, T value) {
 339     store_internal<decorators>(addr, value);
 340   }
 341 
 342   template <typename T>
 343   static inline T load(void* addr) {
 344     return load_internal<decorators, T>(addr);
 345   }
 346 
 347   template <typename T>
 348   static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 349     return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value);
 350   }
 351 
 352   template <typename T>
 353   static inline T atomic_xchg(T new_value, void* addr) {
 354     return atomic_xchg_maybe_locked<decorators>(new_value, addr);
 355   }
 356 
 357   template <typename T>
 358   static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 359                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 360                         size_t length);
 361 
 362   template <typename T>
 363   static void oop_store(void* addr, T value);
 364   template <typename T>
 365   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 366 
 367   template <typename T>
 368   static T oop_load(void* addr);
 369   template <typename T>
 370   static T oop_load_at(oop base, ptrdiff_t offset);
 371 
 372   template <typename T>
 373   static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value);
 374   template <typename T>
 375   static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
 376 
 377   template <typename T>
 378   static T oop_atomic_xchg(T new_value, void* addr);
 379   template <typename T>
 380   static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
 381 
 382   template <typename T>
 383   static void store_at(oop base, ptrdiff_t offset, T value) {
 384     store(field_addr(base, offset), value);
 385   }
 386 
 387   template <typename T>
 388   static T load_at(oop base, ptrdiff_t offset) {
 389     return load<T>(field_addr(base, offset));
 390   }
 391 
 392   template <typename T>
 393   static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 394     return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 395   }
 396 
 397   template <typename T>
 398   static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 399     return atomic_xchg(new_value, field_addr(base, offset));
 400   }
 401 
 402   template <typename T>
 403   static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 404                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 405                             size_t length);
 406 
 407   static void clone(oop src, oop dst, size_t size);
 408 
 409   static oop resolve(oop obj) { return obj; }
 410 };
 411 
 412 // Below is the implementation of the first 4 steps of the template pipeline:
 413 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 414 //           and sets default decorators to sensible values.
 415 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 416 //           multiple types. The P type of the address and T type of the value must
 417 //           match.
 418 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 419 //           avoided, and in that case avoids it (calling raw accesses or


 498     }
 499   };
 500 
 501   template <DecoratorSet decorators, typename T>
 502   struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
 503     typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
 504     static func_t _load_at_func;
 505 
 506     static T load_at_init(oop base, ptrdiff_t offset);
 507 
 508     static inline T load_at(oop base, ptrdiff_t offset) {
 509       return _load_at_func(base, offset);
 510     }
 511   };
 512 
 513   template <DecoratorSet decorators, typename T>
 514   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
 515     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
 516     static func_t _atomic_cmpxchg_func;
 517 
 518     static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value);
 519 
 520     static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 521       return _atomic_cmpxchg_func(new_value, addr, compare_value);
 522     }
 523   };
 524 
 525   template <DecoratorSet decorators, typename T>
 526   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
 527     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
 528     static func_t _atomic_cmpxchg_at_func;
 529 
 530     static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value);
 531 
 532     static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 533       return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
 534     }
 535   };
 536 
 537   template <DecoratorSet decorators, typename T>
 538   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
 539     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
 540     static func_t _atomic_xchg_func;
 541 
 542     static T atomic_xchg_init(T new_value, void* addr);
 543 
 544     static inline T atomic_xchg(T new_value, void* addr) {
 545       return _atomic_xchg_func(new_value, addr);
 546     }
 547   };
 548 
 549   template <DecoratorSet decorators, typename T>
 550   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 551     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 552     static func_t _atomic_xchg_at_func;
 553 
 554     static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset);
 555 
 556     static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 557       return _atomic_xchg_at_func(new_value, base, offset);
 558     }
 559   };
 560 
 561   template <DecoratorSet decorators, typename T>
 562   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 563     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 564     static func_t _arraycopy_func;
 565 
 566     static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 567                                arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 568                                size_t length);
 569 
 570     static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 571                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 572                                  size_t length) {
 573       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 574                              dst_obj, dst_offset_in_bytes, dst_raw,
 575                              length);
 576     }
 577   };


 765       HasDecorator<decorators, AS_RAW>::value, T>::type
 766     load_at(oop base, ptrdiff_t offset) {
 767       return load<decorators, T>(field_addr(base, offset));
 768     }
 769 
 770     template <DecoratorSet decorators, typename T>
 771     inline static typename EnableIf<
 772       !HasDecorator<decorators, AS_RAW>::value, T>::type
 773     load_at(oop base, ptrdiff_t offset) {
 774       if (is_hardwired_primitive<decorators>()) {
 775         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 776         return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
 777       } else {
 778         return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
 779       }
 780     }
 781 
 782     template <DecoratorSet decorators, typename T>
 783     inline static typename EnableIf<
 784       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 785     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 786       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 787       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 788         return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 789       } else {
 790         return Raw::atomic_cmpxchg(new_value, addr, compare_value);
 791       }
 792     }
 793 
 794     template <DecoratorSet decorators, typename T>
 795     inline static typename EnableIf<
 796       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 797     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 798       if (UseCompressedOops) {
 799         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 800         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 801       } else {
 802         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 803         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 804       }
 805     }
 806 
 807     template <DecoratorSet decorators, typename T>
 808     inline static typename EnableIf<
 809       !HasDecorator<decorators, AS_RAW>::value, T>::type
 810     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 811       if (is_hardwired_primitive<decorators>()) {
 812         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 813         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 814       } else {
 815         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
 816       }
 817     }
 818 
 819     template <DecoratorSet decorators, typename T>
 820     inline static typename EnableIf<
 821       HasDecorator<decorators, AS_RAW>::value, T>::type
 822     atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 823       return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
 824     }
 825 
 826     template <DecoratorSet decorators, typename T>
 827     inline static typename EnableIf<
 828       !HasDecorator<decorators, AS_RAW>::value, T>::type
 829     atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 830       if (is_hardwired_primitive<decorators>()) {
 831         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 832         return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
 833       } else {
 834         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
 835       }
 836     }
 837 
 838     template <DecoratorSet decorators, typename T>
 839     inline static typename EnableIf<
 840       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 841     atomic_xchg(T new_value, void* addr) {
 842       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 843       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 844         return Raw::oop_atomic_xchg(new_value, addr);
 845       } else {
 846         return Raw::atomic_xchg(new_value, addr);
 847       }
 848     }
 849 
 850     template <DecoratorSet decorators, typename T>
 851     inline static typename EnableIf<
 852       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 853     atomic_xchg(T new_value, void* addr) {
 854       if (UseCompressedOops) {
 855         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 856         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 857       } else {
 858         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 859         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 860       }
 861     }
 862 
 863     template <DecoratorSet decorators, typename T>
 864     inline static typename EnableIf<
 865       !HasDecorator<decorators, AS_RAW>::value, T>::type
 866     atomic_xchg(T new_value, void* addr) {
 867       if (is_hardwired_primitive<decorators>()) {
 868         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 869         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 870       } else {
 871         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
 872       }
 873     }
 874 
 875     template <DecoratorSet decorators, typename T>
 876     inline static typename EnableIf<
 877       HasDecorator<decorators, AS_RAW>::value, T>::type
 878     atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 879       return atomic_xchg<decorators>(new_value, field_addr(base, offset));
 880     }
 881 
 882     template <DecoratorSet decorators, typename T>
 883     inline static typename EnableIf<
 884       !HasDecorator<decorators, AS_RAW>::value, T>::type
 885     atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 886       if (is_hardwired_primitive<decorators>()) {
 887         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 888         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
 889       } else {
 890         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
 891       }
 892     }
 893 
 894     template <DecoratorSet decorators, typename T>
 895     inline static typename EnableIf<
 896       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
 897     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 898               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 899               size_t length) {
 900       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 901       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 902         return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
 903                                   dst_obj, dst_offset_in_bytes, dst_raw,
 904                                   length);
 905       } else {
 906         return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 907                               dst_obj, dst_offset_in_bytes, dst_raw,
 908                               length);
 909       }
 910     }


1001   inline void store_reduce_types(narrowOop* addr, oop value) {
1002     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1003                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1004     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1005   }
1006 
1007   template <DecoratorSet decorators>
1008   inline void store_reduce_types(narrowOop* addr, narrowOop value) {
1009     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1010                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1011     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1012   }
1013 
1014   template <DecoratorSet decorators>
1015   inline void store_reduce_types(HeapWord* addr, oop value) {
1016     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1017     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1018   }
1019 
1020   template <DecoratorSet decorators, typename T>
1021   inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
1022     return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
1023   }
1024 
1025   template <DecoratorSet decorators>
1026   inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
1027     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1028                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1029     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
1030   }
1031 
1032   template <DecoratorSet decorators>
1033   inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
1034     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1035                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1036     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
1037   }
1038 
1039   template <DecoratorSet decorators>
1040   inline oop atomic_cmpxchg_reduce_types(oop new_value,
1041                                          HeapWord* addr,
1042                                          oop compare_value) {
1043     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1044     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
1045   }
1046 
1047   template <DecoratorSet decorators, typename T>
1048   inline T atomic_xchg_reduce_types(T new_value, T* addr) {
1049     const DecoratorSet expanded_decorators = decorators;
1050     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
1051   }
1052 
1053   template <DecoratorSet decorators>
1054   inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
1055     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1056                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1057     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
1058   }
1059 
1060   template <DecoratorSet decorators>
1061   inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
1062     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1063                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1064     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
1065   }
1066 
1067   template <DecoratorSet decorators>
1068   inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
1069     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1070     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
1071   }
1072 
1073   template <DecoratorSet decorators, typename T>
1074   inline T load_reduce_types(T* addr) {
1075     return PreRuntimeDispatch::load<decorators, T>(addr);
1076   }
1077 
1078   template <DecoratorSet decorators, typename T>
1079   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1080     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1081                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1082     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1083   }
1084 
1085   template <DecoratorSet decorators, typename T>
1086   inline oop load_reduce_types(HeapWord* addr) {
1087     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1088     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1089   }
1090 


1174       (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1175       (MO_VOLATILE | decorators) : decorators>::value;
1176     return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1177   }
1178 
1179   template <DecoratorSet decorators, typename T>
1180   inline T load_at(oop base, ptrdiff_t offset) {
1181     verify_types<decorators, T>();
1182     typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1183                                  typename OopOrNarrowOop<T>::type,
1184                                  typename Decay<T>::type>::type DecayedT;
1185     // Expand the decorators (figure out sensible defaults)
1186     // Potentially remember if we need compressed oop awareness
1187     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1188                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1189                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1190     return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1191   }
1192 
1193   template <DecoratorSet decorators, typename P, typename T>
1194   inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
1195     verify_types<decorators, T>();
1196     typedef typename Decay<P>::type DecayedP;
1197     typedef typename Decay<T>::type DecayedT;
1198     DecayedT new_decayed_value = new_value;
1199     DecayedT compare_decayed_value = compare_value;
1200     const DecoratorSet expanded_decorators = DecoratorFixup<
1201       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1202       (MO_SEQ_CST | decorators) : decorators>::value;
1203     return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
1204                                                             const_cast<DecayedP*>(addr),
1205                                                             compare_decayed_value);
1206   }
1207 
1208   template <DecoratorSet decorators, typename T>
1209   inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
1210     verify_types<decorators, T>();
1211     typedef typename Decay<T>::type DecayedT;
1212     DecayedT new_decayed_value = new_value;
1213     DecayedT compare_decayed_value = compare_value;
1214     // Determine default memory ordering
1215     const DecoratorSet expanded_decorators = DecoratorFixup<
1216       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1217       (MO_SEQ_CST | decorators) : decorators>::value;
1218     // Potentially remember that we need compressed oop awareness
1219     const DecoratorSet final_decorators = expanded_decorators |
1220                                           (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1221                                            INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1222     return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
1223                                                                    offset, compare_decayed_value);
1224   }
1225 
1226   template <DecoratorSet decorators, typename P, typename T>
1227   inline T atomic_xchg(T new_value, P* addr) {
1228     verify_types<decorators, T>();
1229     typedef typename Decay<P>::type DecayedP;
1230     typedef typename Decay<T>::type DecayedT;
1231     DecayedT new_decayed_value = new_value;
1232     // atomic_xchg is only available in SEQ_CST flavour.
1233     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1234     return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
1235                                                          const_cast<DecayedP*>(addr));
1236   }
1237 
1238   template <DecoratorSet decorators, typename T>
1239   inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
1240     verify_types<decorators, T>();
1241     typedef typename Decay<T>::type DecayedT;
1242     DecayedT new_decayed_value = new_value;
1243     // atomic_xchg is only available in SEQ_CST flavour.
1244     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1245                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1246                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1247     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
1248   }
1249 
1250   template <DecoratorSet decorators, typename T>
1251   inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1252                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1253                         size_t length) {
1254     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1255                    (IsSame<T, void>::value || IsIntegral<T>::value) ||
1256                     IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
1257     typedef typename Decay<T>::type DecayedT;
1258     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1259     return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1260                                                        dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1261                                                        length);
1262   }
1263 
1264   template <DecoratorSet decorators>
1265   inline void clone(oop src, oop dst, size_t size) {
1266     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1267     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);




  85   template <DecoratorSet decorators>
  86   inline typename HeapOopType<decorators>::type*
  87   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  88     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  89              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  90   }
  91 
  92   // This metafunction returns whether it is possible for a type T to require
  93   // locking to support wide atomics or not.
  94   template <typename T>
  95 #ifdef SUPPORTS_NATIVE_CX8
  96   struct PossiblyLockedAccess: public IntegralConstant<bool, false> {};
  97 #else
  98   struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {};
  99 #endif
 100 
 101   template <DecoratorSet decorators, typename T>
 102   struct AccessFunctionTypes {
 103     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
 104     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
 105     typedef T (*atomic_cmpxchg_at_func_t)(oop base, ptrdiff_t offset, T compare_value, T new_value);
 106     typedef T (*atomic_xchg_at_func_t)(oop base, ptrdiff_t offset, T new_value);
 107 
 108     typedef T (*load_func_t)(void* addr);
 109     typedef void (*store_func_t)(void* addr, T value);
 110     typedef T (*atomic_cmpxchg_func_t)(void* addr, T compare_value, T new_value);
 111     typedef T (*atomic_xchg_func_t)(void* addr, T new_value);
 112 
 113     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 114                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 115                                      size_t length);
 116     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 117     typedef oop (*resolve_func_t)(oop obj);
 118   };
 119 
 120   template <DecoratorSet decorators>
 121   struct AccessFunctionTypes<decorators, void> {
 122     typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src,
 123                                      arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst,
 124                                      size_t length);
 125   };
 126 
 127   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 128 
 129 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 130   template <DecoratorSet decorators, typename T>                    \
 131   struct AccessFunction<decorators, T, bt>: AllStatic{              \


 276     HasDecorator<ds, MO_RELAXED>::value>::type
 277   store_internal(void* addr, T value);
 278 
 279   template <DecoratorSet ds, typename T>
 280   static inline typename EnableIf<
 281     HasDecorator<ds, MO_VOLATILE>::value>::type
 282   store_internal(void* addr, T value) {
 283     (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value);
 284   }
 285 
 286   template <DecoratorSet ds, typename T>
 287   static inline typename EnableIf<
 288     HasDecorator<ds, MO_UNORDERED>::value>::type
 289   store_internal(void* addr, T value) {
 290     *reinterpret_cast<T*>(addr) = value;
 291   }
 292 
 293   template <DecoratorSet ds, typename T>
 294   static typename EnableIf<
 295     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 296   atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
 297 
 298   template <DecoratorSet ds, typename T>
 299   static typename EnableIf<
 300     HasDecorator<ds, MO_RELAXED>::value, T>::type
 301   atomic_cmpxchg_internal(void* addr, T compare_value, T new_value);
 302 
 303   template <DecoratorSet ds, typename T>
 304   static typename EnableIf<
 305     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 306   atomic_xchg_internal(void* addr, T new_value);
 307 
 308   // The following *_locked mechanisms serve the purpose of handling atomic operations
 309   // that are larger than a machine can handle, and then possibly opt for using
 310   // a slower path using a mutex to perform the operation.
 311 
 312   template <DecoratorSet ds, typename T>
 313   static inline typename EnableIf<
 314     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 315   atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value) {
 316     return atomic_cmpxchg_internal<ds>(addr, compare_value, new_value);
 317   }
 318 
 319   template <DecoratorSet ds, typename T>
 320   static typename EnableIf<
 321     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 322   atomic_cmpxchg_maybe_locked(void* addr, T compare_value, T new_value);
 323 
 324   template <DecoratorSet ds, typename T>
 325   static inline typename EnableIf<
 326     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 327   atomic_xchg_maybe_locked(void* addr, T new_value) {
 328     return atomic_xchg_internal<ds>(addr, new_value);
 329   }
 330 
 331   template <DecoratorSet ds, typename T>
 332   static typename EnableIf<
 333     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 334   atomic_xchg_maybe_locked(void* addr, T new_value);
 335 
 336 public:
 337   template <typename T>
 338   static inline void store(void* addr, T value) {
 339     store_internal<decorators>(addr, value);
 340   }
 341 
 342   template <typename T>
 343   static inline T load(void* addr) {
 344     return load_internal<decorators, T>(addr);
 345   }
 346 
 347   template <typename T>
 348   static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 349     return atomic_cmpxchg_maybe_locked<decorators>(addr, compare_value, new_value);
 350   }
 351 
 352   template <typename T>
 353   static inline T atomic_xchg(void* addr, T new_value) {
 354     return atomic_xchg_maybe_locked<decorators>(addr, new_value);
 355   }
 356 
 357   template <typename T>
 358   static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 359                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 360                         size_t length);
 361 
 362   template <typename T>
 363   static void oop_store(void* addr, T value);
 364   template <typename T>
 365   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 366 
 367   template <typename T>
 368   static T oop_load(void* addr);
 369   template <typename T>
 370   static T oop_load_at(oop base, ptrdiff_t offset);
 371 
 372   template <typename T>
 373   static T oop_atomic_cmpxchg(void* addr, T compare_value, T new_value);
 374   template <typename T>
 375   static T oop_atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value);
 376 
 377   template <typename T>
 378   static T oop_atomic_xchg(void* addr, T new_value);
 379   template <typename T>
 380   static T oop_atomic_xchg_at(oop base, ptrdiff_t offset, T new_value);
 381 
 382   template <typename T>
 383   static void store_at(oop base, ptrdiff_t offset, T value) {
 384     store(field_addr(base, offset), value);
 385   }
 386 
 387   template <typename T>
 388   static T load_at(oop base, ptrdiff_t offset) {
 389     return load<T>(field_addr(base, offset));
 390   }
 391 
 392   template <typename T>
 393   static T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 394     return atomic_cmpxchg(field_addr(base, offset), compare_value, new_value);
 395   }
 396 
 397   template <typename T>
 398   static T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 399     return atomic_xchg(field_addr(base, offset), new_value);
 400   }
 401 
 402   template <typename T>
 403   static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 404                             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 405                             size_t length);
 406 
 407   static void clone(oop src, oop dst, size_t size);
 408 
 409   static oop resolve(oop obj) { return obj; }
 410 };
 411 
 412 // Below is the implementation of the first 4 steps of the template pipeline:
 413 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
 414 //           and sets default decorators to sensible values.
 415 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
 416 //           multiple types. The P type of the address and T type of the value must
 417 //           match.
 418 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
 419 //           avoided, and in that case avoids it (calling raw accesses or


 498     }
 499   };
 500 
 501   template <DecoratorSet decorators, typename T>
 502   struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
 503     typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
 504     static func_t _load_at_func;
 505 
 506     static T load_at_init(oop base, ptrdiff_t offset);
 507 
 508     static inline T load_at(oop base, ptrdiff_t offset) {
 509       return _load_at_func(base, offset);
 510     }
 511   };
 512 
 513   template <DecoratorSet decorators, typename T>
 514   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
 515     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
 516     static func_t _atomic_cmpxchg_func;
 517 
 518     static T atomic_cmpxchg_init(void* addr, T compare_value, T new_value);
 519 
 520     static inline T atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 521       return _atomic_cmpxchg_func(addr, compare_value, new_value);
 522     }
 523   };
 524 
 525   template <DecoratorSet decorators, typename T>
 526   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
 527     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
 528     static func_t _atomic_cmpxchg_at_func;
 529 
 530     static T atomic_cmpxchg_at_init(oop base, ptrdiff_t offset, T compare_value, T new_value);
 531 
 532     static inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 533       return _atomic_cmpxchg_at_func(base, offset, compare_value, new_value);
 534     }
 535   };
 536 
 537   template <DecoratorSet decorators, typename T>
 538   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
 539     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
 540     static func_t _atomic_xchg_func;
 541 
 542     static T atomic_xchg_init(void* addr, T new_value);
 543 
 544     static inline T atomic_xchg(void* addr, T new_value) {
 545       return _atomic_xchg_func(addr, new_value);
 546     }
 547   };
 548 
 549   template <DecoratorSet decorators, typename T>
 550   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 551     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 552     static func_t _atomic_xchg_at_func;
 553 
 554     static T atomic_xchg_at_init(oop base, ptrdiff_t offset, T new_value);
 555 
 556     static inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 557       return _atomic_xchg_at_func(base, offset, new_value);
 558     }
 559   };
 560 
 561   template <DecoratorSet decorators, typename T>
 562   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 563     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 564     static func_t _arraycopy_func;
 565 
 566     static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 567                                arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 568                                size_t length);
 569 
 570     static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 571                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 572                                  size_t length) {
 573       return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw,
 574                              dst_obj, dst_offset_in_bytes, dst_raw,
 575                              length);
 576     }
 577   };


 765       HasDecorator<decorators, AS_RAW>::value, T>::type
 766     load_at(oop base, ptrdiff_t offset) {
 767       return load<decorators, T>(field_addr(base, offset));
 768     }
 769 
 770     template <DecoratorSet decorators, typename T>
 771     inline static typename EnableIf<
 772       !HasDecorator<decorators, AS_RAW>::value, T>::type
 773     load_at(oop base, ptrdiff_t offset) {
 774       if (is_hardwired_primitive<decorators>()) {
 775         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 776         return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
 777       } else {
 778         return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
 779       }
 780     }
 781 
 782     template <DecoratorSet decorators, typename T>
 783     inline static typename EnableIf<
 784       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 785     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 786       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 787       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 788         return Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
 789       } else {
 790         return Raw::atomic_cmpxchg(addr, compare_value, new_value);
 791       }
 792     }
 793 
 794     template <DecoratorSet decorators, typename T>
 795     inline static typename EnableIf<
 796       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 797     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 798       if (UseCompressedOops) {
 799         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 800         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 801       } else {
 802         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 803         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 804       }
 805     }
 806 
 807     template <DecoratorSet decorators, typename T>
 808     inline static typename EnableIf<
 809       !HasDecorator<decorators, AS_RAW>::value, T>::type
 810     atomic_cmpxchg(void* addr, T compare_value, T new_value) {
 811       if (is_hardwired_primitive<decorators>()) {
 812         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 813         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
 814       } else {
 815         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(addr, compare_value, new_value);
 816       }
 817     }
 818 
 819     template <DecoratorSet decorators, typename T>
 820     inline static typename EnableIf<
 821       HasDecorator<decorators, AS_RAW>::value, T>::type
 822     atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 823       return atomic_cmpxchg<decorators>(field_addr(base, offset), compare_value, new_value);
 824     }
 825 
 826     template <DecoratorSet decorators, typename T>
 827     inline static typename EnableIf<
 828       !HasDecorator<decorators, AS_RAW>::value, T>::type
 829     atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
 830       if (is_hardwired_primitive<decorators>()) {
 831         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 832         return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(base, offset, compare_value, new_value);
 833       } else {
 834         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(base, offset, compare_value, new_value);
 835       }
 836     }
 837 
 838     template <DecoratorSet decorators, typename T>
 839     inline static typename EnableIf<
 840       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 841     atomic_xchg(void* addr, T new_value) {
 842       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 843       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 844         return Raw::oop_atomic_xchg(addr, new_value);
 845       } else {
 846         return Raw::atomic_xchg(addr, new_value);
 847       }
 848     }
 849 
 850     template <DecoratorSet decorators, typename T>
 851     inline static typename EnableIf<
 852       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 853     atomic_xchg(void* addr, T new_value) {
 854       if (UseCompressedOops) {
 855         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 856         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 857       } else {
 858         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 859         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 860       }
 861     }
 862 
 863     template <DecoratorSet decorators, typename T>
 864     inline static typename EnableIf<
 865       !HasDecorator<decorators, AS_RAW>::value, T>::type
 866     atomic_xchg(void* addr, T new_value) {
 867       if (is_hardwired_primitive<decorators>()) {
 868         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 869         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
 870       } else {
 871         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(addr, new_value);
 872       }
 873     }
 874 
 875     template <DecoratorSet decorators, typename T>
 876     inline static typename EnableIf<
 877       HasDecorator<decorators, AS_RAW>::value, T>::type
 878     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 879       return atomic_xchg<decorators>(field_addr(base, offset), new_value);
 880     }
 881 
 882     template <DecoratorSet decorators, typename T>
 883     inline static typename EnableIf<
 884       !HasDecorator<decorators, AS_RAW>::value, T>::type
 885     atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
 886       if (is_hardwired_primitive<decorators>()) {
 887         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 888         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(base, offset, new_value);
 889       } else {
 890         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(base, offset, new_value);
 891       }
 892     }
 893 
 894     template <DecoratorSet decorators, typename T>
 895     inline static typename EnableIf<
 896       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
 897     arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 898               arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 899               size_t length) {
 900       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 901       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 902         return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
 903                                   dst_obj, dst_offset_in_bytes, dst_raw,
 904                                   length);
 905       } else {
 906         return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
 907                               dst_obj, dst_offset_in_bytes, dst_raw,
 908                               length);
 909       }
 910     }


1001   inline void store_reduce_types(narrowOop* addr, oop value) {
1002     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1003                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1004     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1005   }
1006 
1007   template <DecoratorSet decorators>
1008   inline void store_reduce_types(narrowOop* addr, narrowOop value) {
1009     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1010                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1011     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1012   }
1013 
1014   template <DecoratorSet decorators>
1015   inline void store_reduce_types(HeapWord* addr, oop value) {
1016     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1017     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
1018   }
1019 
1020   template <DecoratorSet decorators, typename T>
1021   inline T atomic_cmpxchg_reduce_types(T* addr, T compare_value, T new_value) {
1022     return PreRuntimeDispatch::atomic_cmpxchg<decorators>(addr, compare_value, new_value);
1023   }
1024 
1025   template <DecoratorSet decorators>
1026   inline oop atomic_cmpxchg_reduce_types(narrowOop* addr, oop compare_value, oop new_value) {
1027     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1028                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1029     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1030   }
1031 
1032   template <DecoratorSet decorators>
1033   inline narrowOop atomic_cmpxchg_reduce_types(narrowOop* addr, narrowOop compare_value, narrowOop new_value) {
1034     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1035                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1036     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1037   }
1038 
1039   template <DecoratorSet decorators>
1040   inline oop atomic_cmpxchg_reduce_types(HeapWord* addr,
1041                                          oop compare_value,
1042                                          oop new_value) {
1043     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1044     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(addr, compare_value, new_value);
1045   }
1046 
1047   template <DecoratorSet decorators, typename T>
1048   inline T atomic_xchg_reduce_types(T* addr, T new_value) {
1049     const DecoratorSet expanded_decorators = decorators;
1050     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1051   }
1052 
1053   template <DecoratorSet decorators>
1054   inline oop atomic_xchg_reduce_types(narrowOop* addr, oop new_value) {
1055     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1056                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1057     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1058   }
1059 
1060   template <DecoratorSet decorators>
1061   inline narrowOop atomic_xchg_reduce_types(narrowOop* addr, narrowOop new_value) {
1062     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1063                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1064     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1065   }
1066 
1067   template <DecoratorSet decorators>
1068   inline oop atomic_xchg_reduce_types(HeapWord* addr, oop new_value) {
1069     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1070     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(addr, new_value);
1071   }
1072 
1073   template <DecoratorSet decorators, typename T>
1074   inline T load_reduce_types(T* addr) {
1075     return PreRuntimeDispatch::load<decorators, T>(addr);
1076   }
1077 
1078   template <DecoratorSet decorators, typename T>
1079   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
1080     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
1081                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
1082     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
1083   }
1084 
1085   template <DecoratorSet decorators, typename T>
1086   inline oop load_reduce_types(HeapWord* addr) {
1087     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
1088     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
1089   }
1090 


1174       (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1175       (MO_VOLATILE | decorators) : decorators>::value;
1176     return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1177   }
1178 
1179   template <DecoratorSet decorators, typename T>
1180   inline T load_at(oop base, ptrdiff_t offset) {
1181     verify_types<decorators, T>();
1182     typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1183                                  typename OopOrNarrowOop<T>::type,
1184                                  typename Decay<T>::type>::type DecayedT;
1185     // Expand the decorators (figure out sensible defaults)
1186     // Potentially remember if we need compressed oop awareness
1187     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1188                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1189                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1190     return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1191   }
1192 
1193   template <DecoratorSet decorators, typename P, typename T>
1194   inline T atomic_cmpxchg(P* addr, T compare_value, T new_value) {
1195     verify_types<decorators, T>();
1196     typedef typename Decay<P>::type DecayedP;
1197     typedef typename Decay<T>::type DecayedT;
1198     DecayedT new_decayed_value = new_value;
1199     DecayedT compare_decayed_value = compare_value;
1200     const DecoratorSet expanded_decorators = DecoratorFixup<
1201       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1202       (MO_SEQ_CST | decorators) : decorators>::value;
1203     return atomic_cmpxchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1204                                                             compare_decayed_value,
1205                                                             new_decayed_value);
1206   }
1207 
1208   template <DecoratorSet decorators, typename T>
1209   inline T atomic_cmpxchg_at(oop base, ptrdiff_t offset, T compare_value, T new_value) {
1210     verify_types<decorators, T>();
1211     typedef typename Decay<T>::type DecayedT;
1212     DecayedT new_decayed_value = new_value;
1213     DecayedT compare_decayed_value = compare_value;
1214     // Determine default memory ordering
1215     const DecoratorSet expanded_decorators = DecoratorFixup<
1216       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1217       (MO_SEQ_CST | decorators) : decorators>::value;
1218     // Potentially remember that we need compressed oop awareness
1219     const DecoratorSet final_decorators = expanded_decorators |
1220                                           (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1221                                            INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE);
1222     return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(base, offset, compare_decayed_value,
1223                                                                    new_decayed_value);
1224   }
1225 
1226   template <DecoratorSet decorators, typename P, typename T>
1227   inline T atomic_xchg(P* addr, T new_value) {
1228     verify_types<decorators, T>();
1229     typedef typename Decay<P>::type DecayedP;
1230     typedef typename Decay<T>::type DecayedT;
1231     DecayedT new_decayed_value = new_value;
1232     // atomic_xchg is only available in SEQ_CST flavour.
1233     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1234     return atomic_xchg_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr),
1235                                                          new_decayed_value);
1236   }
1237 
1238   template <DecoratorSet decorators, typename T>
1239   inline T atomic_xchg_at(oop base, ptrdiff_t offset, T new_value) {
1240     verify_types<decorators, T>();
1241     typedef typename Decay<T>::type DecayedT;
1242     DecayedT new_decayed_value = new_value;
1243     // atomic_xchg is only available in SEQ_CST flavour.
1244     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1245                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1246                                               INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value;
1247     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(base, offset, new_decayed_value);
1248   }
1249 
1250   template <DecoratorSet decorators, typename T>
1251   inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw,
1252                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
1253                         size_t length) {
1254     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1255                    (IsSame<T, void>::value || IsIntegral<T>::value) ||
1256                     IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
1257     typedef typename Decay<T>::type DecayedT;
1258     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value;
1259     return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw),
1260                                                        dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw),
1261                                                        length);
1262   }
1263 
1264   template <DecoratorSet decorators>
1265   inline void clone(oop src, oop dst, size_t size) {
1266     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1267     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);


< prev index next >