1 /* 2 * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP 26 #define SHARE_OOPS_ACCESSBACKEND_HPP 27 28 #include "gc/shared/barrierSetConfig.hpp" 29 #include "memory/allocation.hpp" 30 #include "metaprogramming/conditional.hpp" 31 #include "metaprogramming/decay.hpp" 32 #include "metaprogramming/enableIf.hpp" 33 #include "metaprogramming/integralConstant.hpp" 34 #include "metaprogramming/isFloatingPoint.hpp" 35 #include "metaprogramming/isIntegral.hpp" 36 #include "metaprogramming/isPointer.hpp" 37 #include "metaprogramming/isSame.hpp" 38 #include "metaprogramming/isVolatile.hpp" 39 #include "oops/accessDecorators.hpp" 40 #include "oops/oopsHierarchy.hpp" 41 #include "runtime/globals.hpp" 42 #include "utilities/debug.hpp" 43 #include "utilities/globalDefinitions.hpp" 44 45 46 // This metafunction returns either oop or narrowOop depending on whether 47 // an access needs to use compressed oops or not. 48 template <DecoratorSet decorators> 49 struct HeapOopType: AllStatic { 50 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value && 51 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; 52 typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type; 53 }; 54 55 namespace AccessInternal { 56 enum BarrierType { 57 BARRIER_STORE, 58 BARRIER_STORE_AT, 59 BARRIER_LOAD, 60 BARRIER_LOAD_AT, 61 BARRIER_ATOMIC_CMPXCHG, 62 BARRIER_ATOMIC_CMPXCHG_AT, 63 BARRIER_ATOMIC_XCHG, 64 BARRIER_ATOMIC_XCHG_AT, 65 BARRIER_ARRAYCOPY, 66 BARRIER_CLONE, 67 BARRIER_RESOLVE 68 }; 69 70 template <DecoratorSet decorators, typename T> 71 struct MustConvertCompressedOop: public IntegralConstant<bool, 72 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value && 73 IsSame<typename HeapOopType<decorators>::type, narrowOop>::value && 74 IsSame<T, oop>::value> {}; 75 76 // This metafunction returns an appropriate oop type if the value is oop-like 77 // and otherwise returns the same type T. 78 template <DecoratorSet decorators, typename T> 79 struct EncodedType: AllStatic { 80 typedef typename Conditional< 81 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 82 typename HeapOopType<decorators>::type, T>::type type; 83 }; 84 85 template <DecoratorSet decorators> 86 inline typename HeapOopType<decorators>::type* 87 oop_field_addr(oop base, ptrdiff_t byte_offset) { 88 return reinterpret_cast<typename HeapOopType<decorators>::type*>( 89 reinterpret_cast<intptr_t>((void*)base) + byte_offset); 90 } 91 92 // This metafunction returns whether it is possible for a type T to require 93 // locking to support wide atomics or not. 94 template <typename T> 95 #ifdef SUPPORTS_NATIVE_CX8 96 struct PossiblyLockedAccess: public IntegralConstant<bool, false> {}; 97 #else 98 struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {}; 99 #endif 100 101 template <DecoratorSet decorators, typename T> 102 struct AccessFunctionTypes { 103 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); 104 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); 105 typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); 106 typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset); 107 108 typedef T (*load_func_t)(void* addr); 109 typedef void (*store_func_t)(void* addr, T value); 110 typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); 111 typedef T (*atomic_xchg_func_t)(T new_value, void* addr); 112 113 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 114 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 115 size_t length); 116 typedef void (*clone_func_t)(oop src, oop dst, size_t size); 117 typedef oop (*resolve_func_t)(oop obj); 118 }; 119 120 template <DecoratorSet decorators> 121 struct AccessFunctionTypes<decorators, void> { 122 typedef bool (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src, 123 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst, 124 size_t length); 125 }; 126 127 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {}; 128 129 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \ 130 template <DecoratorSet decorators, typename T> \ 131 struct AccessFunction<decorators, T, bt>: AllStatic{ \ 132 typedef typename AccessFunctionTypes<decorators, T>::func type; \ 133 } 134 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t); 135 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t); 136 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t); 137 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t); 138 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t); 139 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t); 140 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t); 141 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t); 142 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t); 143 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t); 144 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t); 145 #undef ACCESS_GENERATE_ACCESS_FUNCTION 146 147 template <DecoratorSet decorators, typename T, BarrierType barrier_type> 148 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier(); 149 150 template <DecoratorSet decorators, typename T, BarrierType barrier_type> 151 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier(); 152 153 class AccessLocker { 154 public: 155 AccessLocker(); 156 ~AccessLocker(); 157 }; 158 bool wide_atomic_needs_locking(); 159 160 void* field_addr(oop base, ptrdiff_t offset); 161 162 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow 163 // faster build times, given how frequently included access is. 164 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length); 165 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length); 166 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length); 167 168 void arraycopy_disjoint_words(void* src, void* dst, size_t length); 169 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length); 170 171 template<typename T> 172 void arraycopy_conjoint(T* src, T* dst, size_t length); 173 template<typename T> 174 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length); 175 template<typename T> 176 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length); 177 } 178 179 // This mask specifies what decorators are relevant for raw accesses. When passing 180 // accesses to the raw layer, irrelevant decorators are removed. 181 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK | 182 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL; 183 184 // The RawAccessBarrier performs raw accesses with additional knowledge of 185 // memory ordering, so that OrderAccess/Atomic is called when necessary. 186 // It additionally handles compressed oops, and hence is not completely "raw" 187 // strictly speaking. 188 template <DecoratorSet decorators> 189 class RawAccessBarrier: public AllStatic { 190 protected: 191 static inline void* field_addr(oop base, ptrdiff_t byte_offset) { 192 return AccessInternal::field_addr(base, byte_offset); 193 } 194 195 protected: 196 // Only encode if INTERNAL_VALUE_IS_OOP 197 template <DecoratorSet idecorators, typename T> 198 static inline typename EnableIf< 199 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, 200 typename HeapOopType<idecorators>::type>::type 201 encode_internal(T value); 202 203 template <DecoratorSet idecorators, typename T> 204 static inline typename EnableIf< 205 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 206 encode_internal(T value) { 207 return value; 208 } 209 210 template <typename T> 211 static inline typename AccessInternal::EncodedType<decorators, T>::type 212 encode(T value) { 213 return encode_internal<decorators, T>(value); 214 } 215 216 // Only decode if INTERNAL_VALUE_IS_OOP 217 template <DecoratorSet idecorators, typename T> 218 static inline typename EnableIf< 219 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 220 decode_internal(typename HeapOopType<idecorators>::type value); 221 222 template <DecoratorSet idecorators, typename T> 223 static inline typename EnableIf< 224 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 225 decode_internal(T value) { 226 return value; 227 } 228 229 template <typename T> 230 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) { 231 return decode_internal<decorators, T>(value); 232 } 233 234 protected: 235 template <DecoratorSet ds, typename T> 236 static typename EnableIf< 237 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 238 load_internal(void* addr); 239 240 template <DecoratorSet ds, typename T> 241 static typename EnableIf< 242 HasDecorator<ds, MO_ACQUIRE>::value, T>::type 243 load_internal(void* addr); 244 245 template <DecoratorSet ds, typename T> 246 static typename EnableIf< 247 HasDecorator<ds, MO_RELAXED>::value, T>::type 248 load_internal(void* addr); 249 250 template <DecoratorSet ds, typename T> 251 static inline typename EnableIf< 252 HasDecorator<ds, MO_VOLATILE>::value, T>::type 253 load_internal(void* addr) { 254 return *reinterpret_cast<const volatile T*>(addr); 255 } 256 257 template <DecoratorSet ds, typename T> 258 static inline typename EnableIf< 259 HasDecorator<ds, MO_UNORDERED>::value, T>::type 260 load_internal(void* addr) { 261 return *reinterpret_cast<T*>(addr); 262 } 263 264 template <DecoratorSet ds, typename T> 265 static typename EnableIf< 266 HasDecorator<ds, MO_SEQ_CST>::value>::type 267 store_internal(void* addr, T value); 268 269 template <DecoratorSet ds, typename T> 270 static typename EnableIf< 271 HasDecorator<ds, MO_RELEASE>::value>::type 272 store_internal(void* addr, T value); 273 274 template <DecoratorSet ds, typename T> 275 static typename EnableIf< 276 HasDecorator<ds, MO_RELAXED>::value>::type 277 store_internal(void* addr, T value); 278 279 template <DecoratorSet ds, typename T> 280 static inline typename EnableIf< 281 HasDecorator<ds, MO_VOLATILE>::value>::type 282 store_internal(void* addr, T value) { 283 (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value); 284 } 285 286 template <DecoratorSet ds, typename T> 287 static inline typename EnableIf< 288 HasDecorator<ds, MO_UNORDERED>::value>::type 289 store_internal(void* addr, T value) { 290 *reinterpret_cast<T*>(addr) = value; 291 } 292 293 template <DecoratorSet ds, typename T> 294 static typename EnableIf< 295 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 296 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); 297 298 template <DecoratorSet ds, typename T> 299 static typename EnableIf< 300 HasDecorator<ds, MO_RELAXED>::value, T>::type 301 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); 302 303 template <DecoratorSet ds, typename T> 304 static typename EnableIf< 305 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 306 atomic_xchg_internal(T new_value, void* addr); 307 308 // The following *_locked mechanisms serve the purpose of handling atomic operations 309 // that are larger than a machine can handle, and then possibly opt for using 310 // a slower path using a mutex to perform the operation. 311 312 template <DecoratorSet ds, typename T> 313 static inline typename EnableIf< 314 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type 315 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { 316 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value); 317 } 318 319 template <DecoratorSet ds, typename T> 320 static typename EnableIf< 321 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 322 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value); 323 324 template <DecoratorSet ds, typename T> 325 static inline typename EnableIf< 326 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type 327 atomic_xchg_maybe_locked(T new_value, void* addr) { 328 return atomic_xchg_internal<ds>(new_value, addr); 329 } 330 331 template <DecoratorSet ds, typename T> 332 static typename EnableIf< 333 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 334 atomic_xchg_maybe_locked(T new_value, void* addr); 335 336 public: 337 template <typename T> 338 static inline void store(void* addr, T value) { 339 store_internal<decorators>(addr, value); 340 } 341 342 template <typename T> 343 static inline T load(void* addr) { 344 return load_internal<decorators, T>(addr); 345 } 346 347 template <typename T> 348 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { 349 return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value); 350 } 351 352 template <typename T> 353 static inline T atomic_xchg(T new_value, void* addr) { 354 return atomic_xchg_maybe_locked<decorators>(new_value, addr); 355 } 356 357 template <typename T> 358 static bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 359 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 360 size_t length); 361 362 template <typename T> 363 static void oop_store(void* addr, T value); 364 template <typename T> 365 static void oop_store_at(oop base, ptrdiff_t offset, T value); 366 367 template <typename T> 368 static T oop_load(void* addr); 369 template <typename T> 370 static T oop_load_at(oop base, ptrdiff_t offset); 371 372 template <typename T> 373 static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value); 374 template <typename T> 375 static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); 376 377 template <typename T> 378 static T oop_atomic_xchg(T new_value, void* addr); 379 template <typename T> 380 static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset); 381 382 template <typename T> 383 static void store_at(oop base, ptrdiff_t offset, T value) { 384 store(field_addr(base, offset), value); 385 } 386 387 template <typename T> 388 static T load_at(oop base, ptrdiff_t offset) { 389 return load<T>(field_addr(base, offset)); 390 } 391 392 template <typename T> 393 static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 394 return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); 395 } 396 397 template <typename T> 398 static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 399 return atomic_xchg(new_value, field_addr(base, offset)); 400 } 401 402 template <typename T> 403 static bool oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 404 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 405 size_t length); 406 407 static void clone(oop src, oop dst, size_t size); 408 409 static oop resolve(oop obj) { return obj; } 410 }; 411 412 // Below is the implementation of the first 4 steps of the template pipeline: 413 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers 414 // and sets default decorators to sensible values. 415 // * Step 2: Reduce types. This step makes sure there is only a single T type and not 416 // multiple types. The P type of the address and T type of the value must 417 // match. 418 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be 419 // avoided, and in that case avoids it (calling raw accesses or 420 // primitive accesses in a build that does not require primitive GC barriers) 421 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding 422 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers 423 // to the access. 424 425 namespace AccessInternal { 426 template <typename T> 427 struct OopOrNarrowOopInternal: AllStatic { 428 typedef oop type; 429 }; 430 431 template <> 432 struct OopOrNarrowOopInternal<narrowOop>: AllStatic { 433 typedef narrowOop type; 434 }; 435 436 // This metafunction returns a canonicalized oop/narrowOop type for a passed 437 // in oop-like types passed in from oop_* overloads where the user has sworn 438 // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop, 439 // narrowOoop, instanceOopDesc*, and random other things). 440 // In the oop_* overloads, it must hold that if the passed in type T is not 441 // narrowOop, then it by contract has to be one of many oop-like types implicitly 442 // convertible to oop, and hence returns oop as the canonical oop type. 443 // If it turns out it was not, then the implicit conversion to oop will fail 444 // to compile, as desired. 445 template <typename T> 446 struct OopOrNarrowOop: AllStatic { 447 typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type; 448 }; 449 450 inline void* field_addr(oop base, ptrdiff_t byte_offset) { 451 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset); 452 } 453 // Step 4: Runtime dispatch 454 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the 455 // accessor. This is required when the access either depends on whether compressed oops 456 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC 457 // barriers). The way it works is that a function pointer initially pointing to an 458 // accessor resolution function gets called for each access. Upon first invocation, 459 // it resolves which accessor to be used in future invocations and patches the 460 // function pointer to this new accessor. 461 462 template <DecoratorSet decorators, typename T, BarrierType type> 463 struct RuntimeDispatch: AllStatic {}; 464 465 template <DecoratorSet decorators, typename T> 466 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic { 467 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t; 468 static func_t _store_func; 469 470 static void store_init(void* addr, T value); 471 472 static inline void store(void* addr, T value) { 473 _store_func(addr, value); 474 } 475 }; 476 477 template <DecoratorSet decorators, typename T> 478 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic { 479 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t; 480 static func_t _store_at_func; 481 482 static void store_at_init(oop base, ptrdiff_t offset, T value); 483 484 static inline void store_at(oop base, ptrdiff_t offset, T value) { 485 _store_at_func(base, offset, value); 486 } 487 }; 488 489 template <DecoratorSet decorators, typename T> 490 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic { 491 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t; 492 static func_t _load_func; 493 494 static T load_init(void* addr); 495 496 static inline T load(void* addr) { 497 return _load_func(addr); 498 } 499 }; 500 501 template <DecoratorSet decorators, typename T> 502 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic { 503 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t; 504 static func_t _load_at_func; 505 506 static T load_at_init(oop base, ptrdiff_t offset); 507 508 static inline T load_at(oop base, ptrdiff_t offset) { 509 return _load_at_func(base, offset); 510 } 511 }; 512 513 template <DecoratorSet decorators, typename T> 514 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic { 515 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t; 516 static func_t _atomic_cmpxchg_func; 517 518 static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value); 519 520 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { 521 return _atomic_cmpxchg_func(new_value, addr, compare_value); 522 } 523 }; 524 525 template <DecoratorSet decorators, typename T> 526 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic { 527 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t; 528 static func_t _atomic_cmpxchg_at_func; 529 530 static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value); 531 532 static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 533 return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value); 534 } 535 }; 536 537 template <DecoratorSet decorators, typename T> 538 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic { 539 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t; 540 static func_t _atomic_xchg_func; 541 542 static T atomic_xchg_init(T new_value, void* addr); 543 544 static inline T atomic_xchg(T new_value, void* addr) { 545 return _atomic_xchg_func(new_value, addr); 546 } 547 }; 548 549 template <DecoratorSet decorators, typename T> 550 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic { 551 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t; 552 static func_t _atomic_xchg_at_func; 553 554 static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset); 555 556 static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 557 return _atomic_xchg_at_func(new_value, base, offset); 558 } 559 }; 560 561 template <DecoratorSet decorators, typename T> 562 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic { 563 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t; 564 static func_t _arraycopy_func; 565 566 static bool arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 567 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 568 size_t length); 569 570 static inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 571 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 572 size_t length) { 573 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw, 574 dst_obj, dst_offset_in_bytes, dst_raw, 575 length); 576 } 577 }; 578 579 template <DecoratorSet decorators, typename T> 580 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic { 581 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t; 582 static func_t _clone_func; 583 584 static void clone_init(oop src, oop dst, size_t size); 585 586 static inline void clone(oop src, oop dst, size_t size) { 587 _clone_func(src, dst, size); 588 } 589 }; 590 591 template <DecoratorSet decorators, typename T> 592 struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic { 593 typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t; 594 static func_t _resolve_func; 595 596 static oop resolve_init(oop obj); 597 598 static inline oop resolve(oop obj) { 599 return _resolve_func(obj); 600 } 601 }; 602 603 // Initialize the function pointers to point to the resolving function. 604 template <DecoratorSet decorators, typename T> 605 typename AccessFunction<decorators, T, BARRIER_STORE>::type 606 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init; 607 608 template <DecoratorSet decorators, typename T> 609 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type 610 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init; 611 612 template <DecoratorSet decorators, typename T> 613 typename AccessFunction<decorators, T, BARRIER_LOAD>::type 614 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init; 615 616 template <DecoratorSet decorators, typename T> 617 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type 618 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init; 619 620 template <DecoratorSet decorators, typename T> 621 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type 622 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init; 623 624 template <DecoratorSet decorators, typename T> 625 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type 626 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init; 627 628 template <DecoratorSet decorators, typename T> 629 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type 630 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init; 631 632 template <DecoratorSet decorators, typename T> 633 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type 634 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init; 635 636 template <DecoratorSet decorators, typename T> 637 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type 638 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init; 639 640 template <DecoratorSet decorators, typename T> 641 typename AccessFunction<decorators, T, BARRIER_CLONE>::type 642 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init; 643 644 template <DecoratorSet decorators, typename T> 645 typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type 646 RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init; 647 648 // Step 3: Pre-runtime dispatching. 649 // The PreRuntimeDispatch class is responsible for filtering the barrier strength 650 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime 651 // dispatch point. Otherwise it goes through a runtime check if hardwiring was 652 // not possible. 653 struct PreRuntimeDispatch: AllStatic { 654 template<DecoratorSet decorators> 655 struct CanHardwireRaw: public IntegralConstant< 656 bool, 657 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access 658 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address) 659 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address) 660 {}; 661 662 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP; 663 664 template<DecoratorSet decorators> 665 static bool is_hardwired_primitive() { 666 return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value && 667 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value; 668 } 669 670 template <DecoratorSet decorators, typename T> 671 inline static typename EnableIf< 672 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type 673 store(void* addr, T value) { 674 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 675 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 676 Raw::oop_store(addr, value); 677 } else { 678 Raw::store(addr, value); 679 } 680 } 681 682 template <DecoratorSet decorators, typename T> 683 inline static typename EnableIf< 684 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type 685 store(void* addr, T value) { 686 if (UseCompressedOops) { 687 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 688 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 689 } else { 690 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 691 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 692 } 693 } 694 695 template <DecoratorSet decorators, typename T> 696 inline static typename EnableIf< 697 !HasDecorator<decorators, AS_RAW>::value>::type 698 store(void* addr, T value) { 699 if (is_hardwired_primitive<decorators>()) { 700 const DecoratorSet expanded_decorators = decorators | AS_RAW; 701 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 702 } else { 703 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value); 704 } 705 } 706 707 template <DecoratorSet decorators, typename T> 708 inline static typename EnableIf< 709 HasDecorator<decorators, AS_RAW>::value>::type 710 store_at(oop base, ptrdiff_t offset, T value) { 711 store<decorators>(field_addr(base, offset), value); 712 } 713 714 template <DecoratorSet decorators, typename T> 715 inline static typename EnableIf< 716 !HasDecorator<decorators, AS_RAW>::value>::type 717 store_at(oop base, ptrdiff_t offset, T value) { 718 if (is_hardwired_primitive<decorators>()) { 719 const DecoratorSet expanded_decorators = decorators | AS_RAW; 720 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value); 721 } else { 722 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value); 723 } 724 } 725 726 template <DecoratorSet decorators, typename T> 727 inline static typename EnableIf< 728 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type 729 load(void* addr) { 730 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 731 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 732 return Raw::template oop_load<T>(addr); 733 } else { 734 return Raw::template load<T>(addr); 735 } 736 } 737 738 template <DecoratorSet decorators, typename T> 739 inline static typename EnableIf< 740 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type 741 load(void* addr) { 742 if (UseCompressedOops) { 743 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 744 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 745 } else { 746 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 747 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 748 } 749 } 750 751 template <DecoratorSet decorators, typename T> 752 inline static typename EnableIf< 753 !HasDecorator<decorators, AS_RAW>::value, T>::type 754 load(void* addr) { 755 if (is_hardwired_primitive<decorators>()) { 756 const DecoratorSet expanded_decorators = decorators | AS_RAW; 757 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 758 } else { 759 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr); 760 } 761 } 762 763 template <DecoratorSet decorators, typename T> 764 inline static typename EnableIf< 765 HasDecorator<decorators, AS_RAW>::value, T>::type 766 load_at(oop base, ptrdiff_t offset) { 767 return load<decorators, T>(field_addr(base, offset)); 768 } 769 770 template <DecoratorSet decorators, typename T> 771 inline static typename EnableIf< 772 !HasDecorator<decorators, AS_RAW>::value, T>::type 773 load_at(oop base, ptrdiff_t offset) { 774 if (is_hardwired_primitive<decorators>()) { 775 const DecoratorSet expanded_decorators = decorators | AS_RAW; 776 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset); 777 } else { 778 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset); 779 } 780 } 781 782 template <DecoratorSet decorators, typename T> 783 inline static typename EnableIf< 784 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type 785 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 786 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 787 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 788 return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); 789 } else { 790 return Raw::atomic_cmpxchg(new_value, addr, compare_value); 791 } 792 } 793 794 template <DecoratorSet decorators, typename T> 795 inline static typename EnableIf< 796 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type 797 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 798 if (UseCompressedOops) { 799 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 800 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 801 } else { 802 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 803 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 804 } 805 } 806 807 template <DecoratorSet decorators, typename T> 808 inline static typename EnableIf< 809 !HasDecorator<decorators, AS_RAW>::value, T>::type 810 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 811 if (is_hardwired_primitive<decorators>()) { 812 const DecoratorSet expanded_decorators = decorators | AS_RAW; 813 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 814 } else { 815 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value); 816 } 817 } 818 819 template <DecoratorSet decorators, typename T> 820 inline static typename EnableIf< 821 HasDecorator<decorators, AS_RAW>::value, T>::type 822 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 823 return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value); 824 } 825 826 template <DecoratorSet decorators, typename T> 827 inline static typename EnableIf< 828 !HasDecorator<decorators, AS_RAW>::value, T>::type 829 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 830 if (is_hardwired_primitive<decorators>()) { 831 const DecoratorSet expanded_decorators = decorators | AS_RAW; 832 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value); 833 } else { 834 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value); 835 } 836 } 837 838 template <DecoratorSet decorators, typename T> 839 inline static typename EnableIf< 840 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type 841 atomic_xchg(T new_value, void* addr) { 842 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 843 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 844 return Raw::oop_atomic_xchg(new_value, addr); 845 } else { 846 return Raw::atomic_xchg(new_value, addr); 847 } 848 } 849 850 template <DecoratorSet decorators, typename T> 851 inline static typename EnableIf< 852 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type 853 atomic_xchg(T new_value, void* addr) { 854 if (UseCompressedOops) { 855 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 856 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 857 } else { 858 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 859 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 860 } 861 } 862 863 template <DecoratorSet decorators, typename T> 864 inline static typename EnableIf< 865 !HasDecorator<decorators, AS_RAW>::value, T>::type 866 atomic_xchg(T new_value, void* addr) { 867 if (is_hardwired_primitive<decorators>()) { 868 const DecoratorSet expanded_decorators = decorators | AS_RAW; 869 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 870 } else { 871 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr); 872 } 873 } 874 875 template <DecoratorSet decorators, typename T> 876 inline static typename EnableIf< 877 HasDecorator<decorators, AS_RAW>::value, T>::type 878 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 879 return atomic_xchg<decorators>(new_value, field_addr(base, offset)); 880 } 881 882 template <DecoratorSet decorators, typename T> 883 inline static typename EnableIf< 884 !HasDecorator<decorators, AS_RAW>::value, T>::type 885 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 886 if (is_hardwired_primitive<decorators>()) { 887 const DecoratorSet expanded_decorators = decorators | AS_RAW; 888 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset); 889 } else { 890 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset); 891 } 892 } 893 894 template <DecoratorSet decorators, typename T> 895 inline static typename EnableIf< 896 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type 897 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 898 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 899 size_t length) { 900 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 901 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 902 return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw, 903 dst_obj, dst_offset_in_bytes, dst_raw, 904 length); 905 } else { 906 return Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw, 907 dst_obj, dst_offset_in_bytes, dst_raw, 908 length); 909 } 910 } 911 912 template <DecoratorSet decorators, typename T> 913 inline static typename EnableIf< 914 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type 915 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 916 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 917 size_t length) { 918 if (UseCompressedOops) { 919 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 920 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 921 dst_obj, dst_offset_in_bytes, dst_raw, 922 length); 923 } else { 924 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 925 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 926 dst_obj, dst_offset_in_bytes, dst_raw, 927 length); 928 } 929 } 930 931 template <DecoratorSet decorators, typename T> 932 inline static typename EnableIf< 933 !HasDecorator<decorators, AS_RAW>::value, bool>::type 934 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 935 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 936 size_t length) { 937 if (is_hardwired_primitive<decorators>()) { 938 const DecoratorSet expanded_decorators = decorators | AS_RAW; 939 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 940 dst_obj, dst_offset_in_bytes, dst_raw, 941 length); 942 } else { 943 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw, 944 dst_obj, dst_offset_in_bytes, dst_raw, 945 length); 946 } 947 } 948 949 template <DecoratorSet decorators> 950 inline static typename EnableIf< 951 HasDecorator<decorators, AS_RAW>::value>::type 952 clone(oop src, oop dst, size_t size) { 953 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 954 Raw::clone(src, dst, size); 955 } 956 957 template <DecoratorSet decorators> 958 inline static typename EnableIf< 959 !HasDecorator<decorators, AS_RAW>::value>::type 960 clone(oop src, oop dst, size_t size) { 961 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size); 962 } 963 964 template <DecoratorSet decorators> 965 inline static typename EnableIf< 966 HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type 967 resolve(oop obj) { 968 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 969 return Raw::resolve(obj); 970 } 971 972 template <DecoratorSet decorators> 973 inline static typename EnableIf< 974 !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type 975 resolve(oop obj) { 976 return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj); 977 } 978 }; 979 980 // Step 2: Reduce types. 981 // Enforce that for non-oop types, T and P have to be strictly the same. 982 // P is the type of the address and T is the type of the values. 983 // As for oop types, it is allow to send T in {narrowOop, oop} and 984 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to 985 // the subsequent table. (columns are P, rows are T) 986 // | | HeapWord | oop | narrowOop | 987 // | oop | rt-comp | hw-none | hw-comp | 988 // | narrowOop | x | x | hw-none | 989 // 990 // x means not allowed 991 // rt-comp means it must be checked at runtime whether the oop is compressed. 992 // hw-none means it is statically known the oop will not be compressed. 993 // hw-comp means it is statically known the oop will be compressed. 994 995 template <DecoratorSet decorators, typename T> 996 inline void store_reduce_types(T* addr, T value) { 997 PreRuntimeDispatch::store<decorators>(addr, value); 998 } 999 1000 template <DecoratorSet decorators> 1001 inline void store_reduce_types(narrowOop* addr, oop value) { 1002 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1003 INTERNAL_RT_USE_COMPRESSED_OOPS; 1004 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 1005 } 1006 1007 template <DecoratorSet decorators> 1008 inline void store_reduce_types(narrowOop* addr, narrowOop value) { 1009 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1010 INTERNAL_RT_USE_COMPRESSED_OOPS; 1011 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 1012 } 1013 1014 template <DecoratorSet decorators> 1015 inline void store_reduce_types(HeapWord* addr, oop value) { 1016 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1017 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 1018 } 1019 1020 template <DecoratorSet decorators, typename T> 1021 inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) { 1022 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value); 1023 } 1024 1025 template <DecoratorSet decorators> 1026 inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) { 1027 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1028 INTERNAL_RT_USE_COMPRESSED_OOPS; 1029 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 1030 } 1031 1032 template <DecoratorSet decorators> 1033 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) { 1034 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1035 INTERNAL_RT_USE_COMPRESSED_OOPS; 1036 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 1037 } 1038 1039 template <DecoratorSet decorators> 1040 inline oop atomic_cmpxchg_reduce_types(oop new_value, 1041 HeapWord* addr, 1042 oop compare_value) { 1043 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1044 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 1045 } 1046 1047 template <DecoratorSet decorators, typename T> 1048 inline T atomic_xchg_reduce_types(T new_value, T* addr) { 1049 const DecoratorSet expanded_decorators = decorators; 1050 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1051 } 1052 1053 template <DecoratorSet decorators> 1054 inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) { 1055 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1056 INTERNAL_RT_USE_COMPRESSED_OOPS; 1057 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1058 } 1059 1060 template <DecoratorSet decorators> 1061 inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) { 1062 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1063 INTERNAL_RT_USE_COMPRESSED_OOPS; 1064 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1065 } 1066 1067 template <DecoratorSet decorators> 1068 inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) { 1069 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1070 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1071 } 1072 1073 template <DecoratorSet decorators, typename T> 1074 inline T load_reduce_types(T* addr) { 1075 return PreRuntimeDispatch::load<decorators, T>(addr); 1076 } 1077 1078 template <DecoratorSet decorators, typename T> 1079 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) { 1080 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1081 INTERNAL_RT_USE_COMPRESSED_OOPS; 1082 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr); 1083 } 1084 1085 template <DecoratorSet decorators, typename T> 1086 inline oop load_reduce_types(HeapWord* addr) { 1087 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1088 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr); 1089 } 1090 1091 template <DecoratorSet decorators, typename T> 1092 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 1093 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 1094 size_t length) { 1095 return PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw, 1096 dst_obj, dst_offset_in_bytes, dst_raw, 1097 length); 1098 } 1099 1100 template <DecoratorSet decorators> 1101 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw, 1102 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw, 1103 size_t length) { 1104 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1105 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 1106 dst_obj, dst_offset_in_bytes, dst_raw, 1107 length); 1108 } 1109 1110 template <DecoratorSet decorators> 1111 inline bool arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw, 1112 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw, 1113 size_t length) { 1114 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1115 INTERNAL_RT_USE_COMPRESSED_OOPS; 1116 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 1117 dst_obj, dst_offset_in_bytes, dst_raw, 1118 length); 1119 } 1120 1121 // Step 1: Set default decorators. This step remembers if a type was volatile 1122 // and then sets the MO_VOLATILE decorator by default. Otherwise, a default 1123 // memory ordering is set for the access, and the implied decorator rules 1124 // are applied to select sensible defaults for decorators that have not been 1125 // explicitly set. For example, default object referent strength is set to strong. 1126 // This step also decays the types passed in (e.g. getting rid of CV qualifiers 1127 // and references from the types). This step also perform some type verification 1128 // that the passed in types make sense. 1129 1130 template <DecoratorSet decorators, typename T> 1131 static void verify_types(){ 1132 // If this fails to compile, then you have sent in something that is 1133 // not recognized as a valid primitive type to a primitive Access function. 1134 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated 1135 (IsPointer<T>::value || IsIntegral<T>::value) || 1136 IsFloatingPoint<T>::value)); // not allowed primitive type 1137 } 1138 1139 template <DecoratorSet decorators, typename P, typename T> 1140 inline void store(P* addr, T value) { 1141 verify_types<decorators, T>(); 1142 typedef typename Decay<P>::type DecayedP; 1143 typedef typename Decay<T>::type DecayedT; 1144 DecayedT decayed_value = value; 1145 // If a volatile address is passed in but no memory ordering decorator, 1146 // set the memory ordering to MO_VOLATILE by default. 1147 const DecoratorSet expanded_decorators = DecoratorFixup< 1148 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1149 (MO_VOLATILE | decorators) : decorators>::value; 1150 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value); 1151 } 1152 1153 template <DecoratorSet decorators, typename T> 1154 inline void store_at(oop base, ptrdiff_t offset, T value) { 1155 verify_types<decorators, T>(); 1156 typedef typename Decay<T>::type DecayedT; 1157 DecayedT decayed_value = value; 1158 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | 1159 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1160 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; 1161 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value); 1162 } 1163 1164 template <DecoratorSet decorators, typename P, typename T> 1165 inline T load(P* addr) { 1166 verify_types<decorators, T>(); 1167 typedef typename Decay<P>::type DecayedP; 1168 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 1169 typename OopOrNarrowOop<T>::type, 1170 typename Decay<T>::type>::type DecayedT; 1171 // If a volatile address is passed in but no memory ordering decorator, 1172 // set the memory ordering to MO_VOLATILE by default. 1173 const DecoratorSet expanded_decorators = DecoratorFixup< 1174 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1175 (MO_VOLATILE | decorators) : decorators>::value; 1176 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr)); 1177 } 1178 1179 template <DecoratorSet decorators, typename T> 1180 inline T load_at(oop base, ptrdiff_t offset) { 1181 verify_types<decorators, T>(); 1182 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 1183 typename OopOrNarrowOop<T>::type, 1184 typename Decay<T>::type>::type DecayedT; 1185 // Expand the decorators (figure out sensible defaults) 1186 // Potentially remember if we need compressed oop awareness 1187 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | 1188 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1189 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; 1190 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset); 1191 } 1192 1193 template <DecoratorSet decorators, typename P, typename T> 1194 inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) { 1195 verify_types<decorators, T>(); 1196 typedef typename Decay<P>::type DecayedP; 1197 typedef typename Decay<T>::type DecayedT; 1198 DecayedT new_decayed_value = new_value; 1199 DecayedT compare_decayed_value = compare_value; 1200 const DecoratorSet expanded_decorators = DecoratorFixup< 1201 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1202 (MO_SEQ_CST | decorators) : decorators>::value; 1203 return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value, 1204 const_cast<DecayedP*>(addr), 1205 compare_decayed_value); 1206 } 1207 1208 template <DecoratorSet decorators, typename T> 1209 inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 1210 verify_types<decorators, T>(); 1211 typedef typename Decay<T>::type DecayedT; 1212 DecayedT new_decayed_value = new_value; 1213 DecayedT compare_decayed_value = compare_value; 1214 // Determine default memory ordering 1215 const DecoratorSet expanded_decorators = DecoratorFixup< 1216 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1217 (MO_SEQ_CST | decorators) : decorators>::value; 1218 // Potentially remember that we need compressed oop awareness 1219 const DecoratorSet final_decorators = expanded_decorators | 1220 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1221 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE); 1222 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base, 1223 offset, compare_decayed_value); 1224 } 1225 1226 template <DecoratorSet decorators, typename P, typename T> 1227 inline T atomic_xchg(T new_value, P* addr) { 1228 verify_types<decorators, T>(); 1229 typedef typename Decay<P>::type DecayedP; 1230 typedef typename Decay<T>::type DecayedT; 1231 DecayedT new_decayed_value = new_value; 1232 // atomic_xchg is only available in SEQ_CST flavour. 1233 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value; 1234 return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value, 1235 const_cast<DecayedP*>(addr)); 1236 } 1237 1238 template <DecoratorSet decorators, typename T> 1239 inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 1240 verify_types<decorators, T>(); 1241 typedef typename Decay<T>::type DecayedT; 1242 DecayedT new_decayed_value = new_value; 1243 // atomic_xchg is only available in SEQ_CST flavour. 1244 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST | 1245 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1246 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; 1247 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset); 1248 } 1249 1250 template <DecoratorSet decorators, typename T> 1251 inline bool arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, 1252 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 1253 size_t length) { 1254 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || 1255 (IsSame<T, void>::value || IsIntegral<T>::value) || 1256 IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements 1257 typedef typename Decay<T>::type DecayedT; 1258 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value; 1259 return arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw), 1260 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw), 1261 length); 1262 } 1263 1264 template <DecoratorSet decorators> 1265 inline void clone(oop src, oop dst, size_t size) { 1266 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value; 1267 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size); 1268 } 1269 1270 template <DecoratorSet decorators> 1271 inline oop resolve(oop obj) { 1272 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value; 1273 return PreRuntimeDispatch::resolve<expanded_decorators>(obj); 1274 } 1275 1276 // Infer the type that should be returned from an Access::oop_load. 1277 template <typename P, DecoratorSet decorators> 1278 class OopLoadProxy: public StackObj { 1279 private: 1280 P *const _addr; 1281 public: 1282 OopLoadProxy(P* addr) : _addr(addr) {} 1283 1284 inline operator oop() { 1285 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr); 1286 } 1287 1288 inline operator narrowOop() { 1289 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr); 1290 } 1291 1292 template <typename T> 1293 inline bool operator ==(const T& other) const { 1294 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other; 1295 } 1296 1297 template <typename T> 1298 inline bool operator !=(const T& other) const { 1299 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other; 1300 } 1301 }; 1302 1303 // Infer the type that should be returned from an Access::load_at. 1304 template <DecoratorSet decorators> 1305 class LoadAtProxy: public StackObj { 1306 private: 1307 const oop _base; 1308 const ptrdiff_t _offset; 1309 public: 1310 LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {} 1311 1312 template <typename T> 1313 inline operator T() const { 1314 return load_at<decorators, T>(_base, _offset); 1315 } 1316 1317 template <typename T> 1318 inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; } 1319 1320 template <typename T> 1321 inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; } 1322 }; 1323 1324 // Infer the type that should be returned from an Access::oop_load_at. 1325 template <DecoratorSet decorators> 1326 class OopLoadAtProxy: public StackObj { 1327 private: 1328 const oop _base; 1329 const ptrdiff_t _offset; 1330 public: 1331 OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {} 1332 1333 inline operator oop() const { 1334 return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset); 1335 } 1336 1337 inline operator narrowOop() const { 1338 return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset); 1339 } 1340 1341 template <typename T> 1342 inline bool operator ==(const T& other) const { 1343 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other; 1344 } 1345 1346 template <typename T> 1347 inline bool operator !=(const T& other) const { 1348 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other; 1349 } 1350 }; 1351 } 1352 1353 #endif // SHARE_OOPS_ACCESSBACKEND_HPP