1 /* 2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OOPS_ACCESSBACKEND_HPP 26 #define SHARE_OOPS_ACCESSBACKEND_HPP 27 28 #include "gc/shared/barrierSetConfig.hpp" 29 #include "memory/allocation.hpp" 30 #include "metaprogramming/conditional.hpp" 31 #include "metaprogramming/decay.hpp" 32 #include "metaprogramming/enableIf.hpp" 33 #include "metaprogramming/integralConstant.hpp" 34 #include "metaprogramming/isFloatingPoint.hpp" 35 #include "metaprogramming/isIntegral.hpp" 36 #include "metaprogramming/isPointer.hpp" 37 #include "metaprogramming/isSame.hpp" 38 #include "metaprogramming/isVolatile.hpp" 39 #include "oops/accessDecorators.hpp" 40 #include "oops/oopsHierarchy.hpp" 41 #include "runtime/globals.hpp" 42 #include "utilities/debug.hpp" 43 #include "utilities/globalDefinitions.hpp" 44 45 46 // This metafunction returns either oop or narrowOop depending on whether 47 // an access needs to use compressed oops or not. 48 template <DecoratorSet decorators> 49 struct HeapOopType: AllStatic { 50 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value && 51 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; 52 typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type; 53 }; 54 55 // This meta-function returns either oop or narrowOop depending on whether 56 // a back-end needs to consider compressed oops types or not. 57 template <DecoratorSet decorators> 58 struct ValueOopType: AllStatic { 59 static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; 60 typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type; 61 }; 62 63 namespace AccessInternal { 64 enum BarrierType { 65 BARRIER_STORE, 66 BARRIER_STORE_AT, 67 BARRIER_LOAD, 68 BARRIER_LOAD_AT, 69 BARRIER_ATOMIC_CMPXCHG, 70 BARRIER_ATOMIC_CMPXCHG_AT, 71 BARRIER_ATOMIC_XCHG, 72 BARRIER_ATOMIC_XCHG_AT, 73 BARRIER_ARRAYCOPY, 74 BARRIER_CLONE, 75 BARRIER_VALUE_COPY, 76 BARRIER_RESOLVE 77 }; 78 79 template <DecoratorSet decorators, typename T> 80 struct MustConvertCompressedOop: public IntegralConstant<bool, 81 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value && 82 IsSame<typename HeapOopType<decorators>::type, narrowOop>::value && 83 IsSame<T, oop>::value> {}; 84 85 // This metafunction returns an appropriate oop type if the value is oop-like 86 // and otherwise returns the same type T. 87 template <DecoratorSet decorators, typename T> 88 struct EncodedType: AllStatic { 89 typedef typename Conditional< 90 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 91 typename HeapOopType<decorators>::type, T>::type type; 92 }; 93 94 template <DecoratorSet decorators> 95 inline typename HeapOopType<decorators>::type* 96 oop_field_addr(oop base, ptrdiff_t byte_offset) { 97 return reinterpret_cast<typename HeapOopType<decorators>::type*>( 98 reinterpret_cast<intptr_t>((void*)base) + byte_offset); 99 } 100 101 // This metafunction returns whether it is possible for a type T to require 102 // locking to support wide atomics or not. 103 template <typename T> 104 #ifdef SUPPORTS_NATIVE_CX8 105 struct PossiblyLockedAccess: public IntegralConstant<bool, false> {}; 106 #else 107 struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {}; 108 #endif 109 110 template <DecoratorSet decorators, typename T> 111 struct AccessFunctionTypes { 112 typedef T (*load_at_func_t)(oop base, ptrdiff_t offset); 113 typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value); 114 typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value); 115 typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset); 116 117 typedef T (*load_func_t)(void* addr); 118 typedef void (*store_func_t)(void* addr, T value); 119 typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value); 120 typedef T (*atomic_xchg_func_t)(T new_value, void* addr); 121 122 typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 123 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 124 size_t length); 125 typedef void (*clone_func_t)(oop src, oop dst, size_t size); 126 typedef void (*value_copy_func_t)(void* src, void* dst, ValueKlass* md); 127 typedef oop (*resolve_func_t)(oop obj); 128 }; 129 130 template <DecoratorSet decorators> 131 struct AccessFunctionTypes<decorators, void> { 132 typedef void (*arraycopy_func_t)(arrayOop src_obj, size_t src_offset_in_bytes, void* src, 133 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst, 134 size_t length); 135 }; 136 137 template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {}; 138 139 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func) \ 140 template <DecoratorSet decorators, typename T> \ 141 struct AccessFunction<decorators, T, bt>: AllStatic{ \ 142 typedef typename AccessFunctionTypes<decorators, T>::func type; \ 143 } 144 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t); 145 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t); 146 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t); 147 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t); 148 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t); 149 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t); 150 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t); 151 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t); 152 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t); 153 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t); 154 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_VALUE_COPY, value_copy_func_t); 155 ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t); 156 #undef ACCESS_GENERATE_ACCESS_FUNCTION 157 158 template <DecoratorSet decorators, typename T, BarrierType barrier_type> 159 typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier(); 160 161 template <DecoratorSet decorators, typename T, BarrierType barrier_type> 162 typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier(); 163 164 class AccessLocker { 165 public: 166 AccessLocker(); 167 ~AccessLocker(); 168 }; 169 bool wide_atomic_needs_locking(); 170 171 void* field_addr(oop base, ptrdiff_t offset); 172 173 // Forward calls to Copy:: in the cpp file to reduce dependencies and allow 174 // faster build times, given how frequently included access is. 175 void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length); 176 void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length); 177 void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length); 178 179 void arraycopy_disjoint_words(void* src, void* dst, size_t length); 180 void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length); 181 182 template<typename T> 183 void arraycopy_conjoint(T* src, T* dst, size_t length); 184 template<typename T> 185 void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length); 186 template<typename T> 187 void arraycopy_conjoint_atomic(T* src, T* dst, size_t length); 188 } 189 190 // This mask specifies what decorators are relevant for raw accesses. When passing 191 // accesses to the raw layer, irrelevant decorators are removed. 192 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK | 193 ARRAYCOPY_DECORATOR_MASK | IS_NOT_NULL; 194 195 // The RawAccessBarrier performs raw accesses with additional knowledge of 196 // memory ordering, so that OrderAccess/Atomic is called when necessary. 197 // It additionally handles compressed oops, and hence is not completely "raw" 198 // strictly speaking. 199 template <DecoratorSet decorators> 200 class RawAccessBarrier: public AllStatic { 201 protected: 202 static inline void* field_addr(oop base, ptrdiff_t byte_offset) { 203 return AccessInternal::field_addr(base, byte_offset); 204 } 205 206 protected: 207 // Only encode if INTERNAL_VALUE_IS_OOP 208 template <DecoratorSet idecorators, typename T> 209 static inline typename EnableIf< 210 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, 211 typename HeapOopType<idecorators>::type>::type 212 encode_internal(T value); 213 214 template <DecoratorSet idecorators, typename T> 215 static inline typename EnableIf< 216 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 217 encode_internal(T value) { 218 return value; 219 } 220 221 template <typename T> 222 static inline typename AccessInternal::EncodedType<decorators, T>::type 223 encode(T value) { 224 return encode_internal<decorators, T>(value); 225 } 226 227 // Only decode if INTERNAL_VALUE_IS_OOP 228 template <DecoratorSet idecorators, typename T> 229 static inline typename EnableIf< 230 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 231 decode_internal(typename HeapOopType<idecorators>::type value); 232 233 template <DecoratorSet idecorators, typename T> 234 static inline typename EnableIf< 235 !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 236 decode_internal(T value) { 237 return value; 238 } 239 240 template <typename T> 241 static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) { 242 return decode_internal<decorators, T>(value); 243 } 244 245 protected: 246 template <DecoratorSet ds, typename T> 247 static typename EnableIf< 248 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 249 load_internal(void* addr); 250 251 template <DecoratorSet ds, typename T> 252 static typename EnableIf< 253 HasDecorator<ds, MO_ACQUIRE>::value, T>::type 254 load_internal(void* addr); 255 256 template <DecoratorSet ds, typename T> 257 static typename EnableIf< 258 HasDecorator<ds, MO_RELAXED>::value, T>::type 259 load_internal(void* addr); 260 261 template <DecoratorSet ds, typename T> 262 static inline typename EnableIf< 263 HasDecorator<ds, MO_VOLATILE>::value, T>::type 264 load_internal(void* addr) { 265 return *reinterpret_cast<const volatile T*>(addr); 266 } 267 268 template <DecoratorSet ds, typename T> 269 static inline typename EnableIf< 270 HasDecorator<ds, MO_UNORDERED>::value, T>::type 271 load_internal(void* addr) { 272 return *reinterpret_cast<T*>(addr); 273 } 274 275 template <DecoratorSet ds, typename T> 276 static typename EnableIf< 277 HasDecorator<ds, MO_SEQ_CST>::value>::type 278 store_internal(void* addr, T value); 279 280 template <DecoratorSet ds, typename T> 281 static typename EnableIf< 282 HasDecorator<ds, MO_RELEASE>::value>::type 283 store_internal(void* addr, T value); 284 285 template <DecoratorSet ds, typename T> 286 static typename EnableIf< 287 HasDecorator<ds, MO_RELAXED>::value>::type 288 store_internal(void* addr, T value); 289 290 template <DecoratorSet ds, typename T> 291 static inline typename EnableIf< 292 HasDecorator<ds, MO_VOLATILE>::value>::type 293 store_internal(void* addr, T value) { 294 (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value); 295 } 296 297 template <DecoratorSet ds, typename T> 298 static inline typename EnableIf< 299 HasDecorator<ds, MO_UNORDERED>::value>::type 300 store_internal(void* addr, T value) { 301 *reinterpret_cast<T*>(addr) = value; 302 } 303 304 template <DecoratorSet ds, typename T> 305 static typename EnableIf< 306 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 307 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); 308 309 template <DecoratorSet ds, typename T> 310 static typename EnableIf< 311 HasDecorator<ds, MO_RELAXED>::value, T>::type 312 atomic_cmpxchg_internal(T new_value, void* addr, T compare_value); 313 314 template <DecoratorSet ds, typename T> 315 static typename EnableIf< 316 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 317 atomic_xchg_internal(T new_value, void* addr); 318 319 // The following *_locked mechanisms serve the purpose of handling atomic operations 320 // that are larger than a machine can handle, and then possibly opt for using 321 // a slower path using a mutex to perform the operation. 322 323 template <DecoratorSet ds, typename T> 324 static inline typename EnableIf< 325 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type 326 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { 327 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value); 328 } 329 330 template <DecoratorSet ds, typename T> 331 static typename EnableIf< 332 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 333 atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value); 334 335 template <DecoratorSet ds, typename T> 336 static inline typename EnableIf< 337 !AccessInternal::PossiblyLockedAccess<T>::value, T>::type 338 atomic_xchg_maybe_locked(T new_value, void* addr) { 339 return atomic_xchg_internal<ds>(new_value, addr); 340 } 341 342 template <DecoratorSet ds, typename T> 343 static typename EnableIf< 344 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 345 atomic_xchg_maybe_locked(T new_value, void* addr); 346 347 public: 348 template <typename T> 349 static inline void store(void* addr, T value) { 350 store_internal<decorators>(addr, value); 351 } 352 353 template <typename T> 354 static inline T load(void* addr) { 355 return load_internal<decorators, T>(addr); 356 } 357 358 template <typename T> 359 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { 360 return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value); 361 } 362 363 template <typename T> 364 static inline T atomic_xchg(T new_value, void* addr) { 365 return atomic_xchg_maybe_locked<decorators>(new_value, addr); 366 } 367 368 template <typename T> 369 static void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 370 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 371 size_t length); 372 373 template <typename T> 374 static void oop_store(void* addr, T value); 375 template <typename T> 376 static void oop_store_at(oop base, ptrdiff_t offset, T value); 377 378 template <typename T> 379 static T oop_load(void* addr); 380 template <typename T> 381 static T oop_load_at(oop base, ptrdiff_t offset); 382 383 template <typename T> 384 static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value); 385 template <typename T> 386 static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value); 387 388 template <typename T> 389 static T oop_atomic_xchg(T new_value, void* addr); 390 template <typename T> 391 static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset); 392 393 template <typename T> 394 static void store_at(oop base, ptrdiff_t offset, T value) { 395 store(field_addr(base, offset), value); 396 } 397 398 template <typename T> 399 static T load_at(oop base, ptrdiff_t offset) { 400 return load<T>(field_addr(base, offset)); 401 } 402 403 template <typename T> 404 static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 405 return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); 406 } 407 408 template <typename T> 409 static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 410 return atomic_xchg(new_value, field_addr(base, offset)); 411 } 412 413 template <typename T> 414 static void oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 415 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 416 size_t length); 417 418 static void clone(oop src, oop dst, size_t size); 419 420 static void value_copy(void* src, void* dst, ValueKlass* md); 421 422 static oop resolve(oop obj) { return obj; } 423 }; 424 425 // Below is the implementation of the first 4 steps of the template pipeline: 426 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers 427 // and sets default decorators to sensible values. 428 // * Step 2: Reduce types. This step makes sure there is only a single T type and not 429 // multiple types. The P type of the address and T type of the value must 430 // match. 431 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be 432 // avoided, and in that case avoids it (calling raw accesses or 433 // primitive accesses in a build that does not require primitive GC barriers) 434 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding 435 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers 436 // to the access. 437 438 namespace AccessInternal { 439 template <typename T> 440 struct OopOrNarrowOopInternal: AllStatic { 441 typedef oop type; 442 }; 443 444 template <> 445 struct OopOrNarrowOopInternal<narrowOop>: AllStatic { 446 typedef narrowOop type; 447 }; 448 449 // This metafunction returns a canonicalized oop/narrowOop type for a passed 450 // in oop-like types passed in from oop_* overloads where the user has sworn 451 // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop, 452 // narrowOoop, instanceOopDesc*, and random other things). 453 // In the oop_* overloads, it must hold that if the passed in type T is not 454 // narrowOop, then it by contract has to be one of many oop-like types implicitly 455 // convertible to oop, and hence returns oop as the canonical oop type. 456 // If it turns out it was not, then the implicit conversion to oop will fail 457 // to compile, as desired. 458 template <typename T> 459 struct OopOrNarrowOop: AllStatic { 460 typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type; 461 }; 462 463 inline void* field_addr(oop base, ptrdiff_t byte_offset) { 464 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset); 465 } 466 // Step 4: Runtime dispatch 467 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the 468 // accessor. This is required when the access either depends on whether compressed oops 469 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC 470 // barriers). The way it works is that a function pointer initially pointing to an 471 // accessor resolution function gets called for each access. Upon first invocation, 472 // it resolves which accessor to be used in future invocations and patches the 473 // function pointer to this new accessor. 474 475 template <DecoratorSet decorators, typename T, BarrierType type> 476 struct RuntimeDispatch: AllStatic {}; 477 478 template <DecoratorSet decorators, typename T> 479 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic { 480 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t; 481 static func_t _store_func; 482 483 static void store_init(void* addr, T value); 484 485 static inline void store(void* addr, T value) { 486 _store_func(addr, value); 487 } 488 }; 489 490 template <DecoratorSet decorators, typename T> 491 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic { 492 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t; 493 static func_t _store_at_func; 494 495 static void store_at_init(oop base, ptrdiff_t offset, T value); 496 497 static inline void store_at(oop base, ptrdiff_t offset, T value) { 498 _store_at_func(base, offset, value); 499 } 500 }; 501 502 template <DecoratorSet decorators, typename T> 503 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic { 504 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t; 505 static func_t _load_func; 506 507 static T load_init(void* addr); 508 509 static inline T load(void* addr) { 510 return _load_func(addr); 511 } 512 }; 513 514 template <DecoratorSet decorators, typename T> 515 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic { 516 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t; 517 static func_t _load_at_func; 518 519 static T load_at_init(oop base, ptrdiff_t offset); 520 521 static inline T load_at(oop base, ptrdiff_t offset) { 522 return _load_at_func(base, offset); 523 } 524 }; 525 526 template <DecoratorSet decorators, typename T> 527 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic { 528 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t; 529 static func_t _atomic_cmpxchg_func; 530 531 static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value); 532 533 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { 534 return _atomic_cmpxchg_func(new_value, addr, compare_value); 535 } 536 }; 537 538 template <DecoratorSet decorators, typename T> 539 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic { 540 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t; 541 static func_t _atomic_cmpxchg_at_func; 542 543 static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value); 544 545 static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 546 return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value); 547 } 548 }; 549 550 template <DecoratorSet decorators, typename T> 551 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic { 552 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t; 553 static func_t _atomic_xchg_func; 554 555 static T atomic_xchg_init(T new_value, void* addr); 556 557 static inline T atomic_xchg(T new_value, void* addr) { 558 return _atomic_xchg_func(new_value, addr); 559 } 560 }; 561 562 template <DecoratorSet decorators, typename T> 563 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic { 564 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t; 565 static func_t _atomic_xchg_at_func; 566 567 static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset); 568 569 static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 570 return _atomic_xchg_at_func(new_value, base, offset); 571 } 572 }; 573 574 template <DecoratorSet decorators, typename T> 575 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic { 576 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t; 577 static func_t _arraycopy_func; 578 579 static void arraycopy_init(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 580 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 581 size_t length); 582 583 static inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 584 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 585 size_t length) { 586 return _arraycopy_func(src_obj, src_offset_in_bytes, src_raw, 587 dst_obj, dst_offset_in_bytes, dst_raw, 588 length); 589 } 590 }; 591 592 template <DecoratorSet decorators, typename T> 593 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic { 594 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t; 595 static func_t _clone_func; 596 597 static void clone_init(oop src, oop dst, size_t size); 598 599 static inline void clone(oop src, oop dst, size_t size) { 600 _clone_func(src, dst, size); 601 } 602 }; 603 604 template <DecoratorSet decorators, typename T> 605 struct RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>: AllStatic { 606 typedef typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type func_t; 607 static func_t _value_copy_func; 608 609 static void value_copy_init(void* src, void* dst, ValueKlass* md); 610 611 static inline void value_copy(void* src, void* dst, ValueKlass* md) { 612 _value_copy_func(src, dst, md); 613 } 614 }; 615 616 template <DecoratorSet decorators, typename T> 617 struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic { 618 typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t; 619 static func_t _resolve_func; 620 621 static oop resolve_init(oop obj); 622 623 static inline oop resolve(oop obj) { 624 return _resolve_func(obj); 625 } 626 }; 627 628 // Initialize the function pointers to point to the resolving function. 629 template <DecoratorSet decorators, typename T> 630 typename AccessFunction<decorators, T, BARRIER_STORE>::type 631 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init; 632 633 template <DecoratorSet decorators, typename T> 634 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type 635 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init; 636 637 template <DecoratorSet decorators, typename T> 638 typename AccessFunction<decorators, T, BARRIER_LOAD>::type 639 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init; 640 641 template <DecoratorSet decorators, typename T> 642 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type 643 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init; 644 645 template <DecoratorSet decorators, typename T> 646 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type 647 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init; 648 649 template <DecoratorSet decorators, typename T> 650 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type 651 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init; 652 653 template <DecoratorSet decorators, typename T> 654 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type 655 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init; 656 657 template <DecoratorSet decorators, typename T> 658 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type 659 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init; 660 661 template <DecoratorSet decorators, typename T> 662 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type 663 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init; 664 665 template <DecoratorSet decorators, typename T> 666 typename AccessFunction<decorators, T, BARRIER_CLONE>::type 667 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init; 668 669 template <DecoratorSet decorators, typename T> 670 typename AccessFunction<decorators, T, BARRIER_VALUE_COPY>::type 671 RuntimeDispatch<decorators, T, BARRIER_VALUE_COPY>::_value_copy_func = &value_copy_init; 672 673 template <DecoratorSet decorators, typename T> 674 typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type 675 RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init; 676 677 // Step 3: Pre-runtime dispatching. 678 // The PreRuntimeDispatch class is responsible for filtering the barrier strength 679 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime 680 // dispatch point. Otherwise it goes through a runtime check if hardwiring was 681 // not possible. 682 struct PreRuntimeDispatch: AllStatic { 683 template<DecoratorSet decorators> 684 struct CanHardwireRaw: public IntegralConstant< 685 bool, 686 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access 687 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address) 688 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address) 689 {}; 690 691 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP; 692 693 template<DecoratorSet decorators> 694 static bool is_hardwired_primitive() { 695 return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value && 696 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value; 697 } 698 699 template <DecoratorSet decorators, typename T> 700 inline static typename EnableIf< 701 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type 702 store(void* addr, T value) { 703 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 704 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 705 Raw::oop_store(addr, value); 706 } else { 707 Raw::store(addr, value); 708 } 709 } 710 711 template <DecoratorSet decorators, typename T> 712 inline static typename EnableIf< 713 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type 714 store(void* addr, T value) { 715 if (UseCompressedOops) { 716 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 717 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 718 } else { 719 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 720 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 721 } 722 } 723 724 template <DecoratorSet decorators, typename T> 725 inline static typename EnableIf< 726 !HasDecorator<decorators, AS_RAW>::value>::type 727 store(void* addr, T value) { 728 if (is_hardwired_primitive<decorators>()) { 729 const DecoratorSet expanded_decorators = decorators | AS_RAW; 730 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 731 } else { 732 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value); 733 } 734 } 735 736 template <DecoratorSet decorators, typename T> 737 inline static typename EnableIf< 738 HasDecorator<decorators, AS_RAW>::value>::type 739 store_at(oop base, ptrdiff_t offset, T value) { 740 store<decorators>(field_addr(base, offset), value); 741 } 742 743 template <DecoratorSet decorators, typename T> 744 inline static typename EnableIf< 745 !HasDecorator<decorators, AS_RAW>::value>::type 746 store_at(oop base, ptrdiff_t offset, T value) { 747 if (is_hardwired_primitive<decorators>()) { 748 const DecoratorSet expanded_decorators = decorators | AS_RAW; 749 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value); 750 } else { 751 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value); 752 } 753 } 754 755 template <DecoratorSet decorators, typename T> 756 inline static typename EnableIf< 757 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type 758 load(void* addr) { 759 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 760 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 761 return Raw::template oop_load<T>(addr); 762 } else { 763 return Raw::template load<T>(addr); 764 } 765 } 766 767 template <DecoratorSet decorators, typename T> 768 inline static typename EnableIf< 769 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type 770 load(void* addr) { 771 if (UseCompressedOops) { 772 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 773 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 774 } else { 775 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 776 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 777 } 778 } 779 780 template <DecoratorSet decorators, typename T> 781 inline static typename EnableIf< 782 !HasDecorator<decorators, AS_RAW>::value, T>::type 783 load(void* addr) { 784 if (is_hardwired_primitive<decorators>()) { 785 const DecoratorSet expanded_decorators = decorators | AS_RAW; 786 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 787 } else { 788 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr); 789 } 790 } 791 792 template <DecoratorSet decorators, typename T> 793 inline static typename EnableIf< 794 HasDecorator<decorators, AS_RAW>::value, T>::type 795 load_at(oop base, ptrdiff_t offset) { 796 return load<decorators, T>(field_addr(base, offset)); 797 } 798 799 template <DecoratorSet decorators, typename T> 800 inline static typename EnableIf< 801 !HasDecorator<decorators, AS_RAW>::value, T>::type 802 load_at(oop base, ptrdiff_t offset) { 803 if (is_hardwired_primitive<decorators>()) { 804 const DecoratorSet expanded_decorators = decorators | AS_RAW; 805 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset); 806 } else { 807 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset); 808 } 809 } 810 811 template <DecoratorSet decorators, typename T> 812 inline static typename EnableIf< 813 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type 814 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 815 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 816 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 817 return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); 818 } else { 819 return Raw::atomic_cmpxchg(new_value, addr, compare_value); 820 } 821 } 822 823 template <DecoratorSet decorators, typename T> 824 inline static typename EnableIf< 825 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type 826 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 827 if (UseCompressedOops) { 828 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 829 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 830 } else { 831 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 832 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 833 } 834 } 835 836 template <DecoratorSet decorators, typename T> 837 inline static typename EnableIf< 838 !HasDecorator<decorators, AS_RAW>::value, T>::type 839 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 840 if (is_hardwired_primitive<decorators>()) { 841 const DecoratorSet expanded_decorators = decorators | AS_RAW; 842 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 843 } else { 844 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value); 845 } 846 } 847 848 template <DecoratorSet decorators, typename T> 849 inline static typename EnableIf< 850 HasDecorator<decorators, AS_RAW>::value, T>::type 851 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 852 return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value); 853 } 854 855 template <DecoratorSet decorators, typename T> 856 inline static typename EnableIf< 857 !HasDecorator<decorators, AS_RAW>::value, T>::type 858 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 859 if (is_hardwired_primitive<decorators>()) { 860 const DecoratorSet expanded_decorators = decorators | AS_RAW; 861 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value); 862 } else { 863 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value); 864 } 865 } 866 867 template <DecoratorSet decorators, typename T> 868 inline static typename EnableIf< 869 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type 870 atomic_xchg(T new_value, void* addr) { 871 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 872 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 873 return Raw::oop_atomic_xchg(new_value, addr); 874 } else { 875 return Raw::atomic_xchg(new_value, addr); 876 } 877 } 878 879 template <DecoratorSet decorators, typename T> 880 inline static typename EnableIf< 881 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type 882 atomic_xchg(T new_value, void* addr) { 883 if (UseCompressedOops) { 884 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 885 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 886 } else { 887 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 888 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 889 } 890 } 891 892 template <DecoratorSet decorators, typename T> 893 inline static typename EnableIf< 894 !HasDecorator<decorators, AS_RAW>::value, T>::type 895 atomic_xchg(T new_value, void* addr) { 896 if (is_hardwired_primitive<decorators>()) { 897 const DecoratorSet expanded_decorators = decorators | AS_RAW; 898 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 899 } else { 900 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr); 901 } 902 } 903 904 template <DecoratorSet decorators, typename T> 905 inline static typename EnableIf< 906 HasDecorator<decorators, AS_RAW>::value, T>::type 907 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 908 return atomic_xchg<decorators>(new_value, field_addr(base, offset)); 909 } 910 911 template <DecoratorSet decorators, typename T> 912 inline static typename EnableIf< 913 !HasDecorator<decorators, AS_RAW>::value, T>::type 914 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 915 if (is_hardwired_primitive<decorators>()) { 916 const DecoratorSet expanded_decorators = decorators | AS_RAW; 917 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset); 918 } else { 919 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset); 920 } 921 } 922 923 template <DecoratorSet decorators, typename T> 924 inline static typename EnableIf< 925 HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, void>::type 926 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 927 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 928 size_t length) { 929 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 930 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 931 Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw, 932 dst_obj, dst_offset_in_bytes, dst_raw, 933 length); 934 } else { 935 Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw, 936 dst_obj, dst_offset_in_bytes, dst_raw, 937 length); 938 } 939 } 940 941 template <DecoratorSet decorators, typename T> 942 inline static typename EnableIf< 943 HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, void>::type 944 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 945 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 946 size_t length) { 947 if (UseCompressedOops) { 948 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 949 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 950 dst_obj, dst_offset_in_bytes, dst_raw, 951 length); 952 } else { 953 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 954 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 955 dst_obj, dst_offset_in_bytes, dst_raw, 956 length); 957 } 958 } 959 960 template <DecoratorSet decorators, typename T> 961 inline static typename EnableIf< 962 !HasDecorator<decorators, AS_RAW>::value, void>::type 963 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 964 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 965 size_t length) { 966 if (is_hardwired_primitive<decorators>()) { 967 const DecoratorSet expanded_decorators = decorators | AS_RAW; 968 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 969 dst_obj, dst_offset_in_bytes, dst_raw, 970 length); 971 } else { 972 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, src_offset_in_bytes, src_raw, 973 dst_obj, dst_offset_in_bytes, dst_raw, 974 length); 975 } 976 } 977 978 template <DecoratorSet decorators> 979 inline static typename EnableIf< 980 HasDecorator<decorators, AS_RAW>::value>::type 981 clone(oop src, oop dst, size_t size) { 982 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 983 Raw::clone(src, dst, size); 984 } 985 986 template <DecoratorSet decorators> 987 inline static typename EnableIf< 988 !HasDecorator<decorators, AS_RAW>::value>::type 989 clone(oop src, oop dst, size_t size) { 990 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size); 991 } 992 993 template <DecoratorSet decorators> 994 inline static typename EnableIf< 995 HasDecorator<decorators, AS_RAW>::value>::type 996 value_copy(void* src, void* dst, ValueKlass* md) { 997 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 998 Raw::value_copy(src, dst, md); 999 } 1000 1001 template <DecoratorSet decorators> 1002 inline static typename EnableIf< 1003 !HasDecorator<decorators, AS_RAW>::value>::type 1004 value_copy(void* src, void* dst, ValueKlass* md) { 1005 const DecoratorSet expanded_decorators = decorators; 1006 RuntimeDispatch<expanded_decorators, void*, BARRIER_VALUE_COPY>::value_copy(src, dst, md); 1007 } 1008 1009 1010 template <DecoratorSet decorators> 1011 inline static typename EnableIf< 1012 HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type 1013 resolve(oop obj) { 1014 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 1015 return Raw::resolve(obj); 1016 } 1017 1018 template <DecoratorSet decorators> 1019 inline static typename EnableIf< 1020 !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type 1021 resolve(oop obj) { 1022 return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj); 1023 } 1024 }; 1025 1026 // Step 2: Reduce types. 1027 // Enforce that for non-oop types, T and P have to be strictly the same. 1028 // P is the type of the address and T is the type of the values. 1029 // As for oop types, it is allow to send T in {narrowOop, oop} and 1030 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to 1031 // the subsequent table. (columns are P, rows are T) 1032 // | | HeapWord | oop | narrowOop | 1033 // | oop | rt-comp | hw-none | hw-comp | 1034 // | narrowOop | x | x | hw-none | 1035 // 1036 // x means not allowed 1037 // rt-comp means it must be checked at runtime whether the oop is compressed. 1038 // hw-none means it is statically known the oop will not be compressed. 1039 // hw-comp means it is statically known the oop will be compressed. 1040 1041 template <DecoratorSet decorators, typename T> 1042 inline void store_reduce_types(T* addr, T value) { 1043 PreRuntimeDispatch::store<decorators>(addr, value); 1044 } 1045 1046 template <DecoratorSet decorators> 1047 inline void store_reduce_types(narrowOop* addr, oop value) { 1048 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1049 INTERNAL_RT_USE_COMPRESSED_OOPS; 1050 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 1051 } 1052 1053 template <DecoratorSet decorators> 1054 inline void store_reduce_types(narrowOop* addr, narrowOop value) { 1055 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1056 INTERNAL_RT_USE_COMPRESSED_OOPS; 1057 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 1058 } 1059 1060 template <DecoratorSet decorators> 1061 inline void store_reduce_types(HeapWord* addr, oop value) { 1062 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1063 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 1064 } 1065 1066 template <DecoratorSet decorators, typename T> 1067 inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) { 1068 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value); 1069 } 1070 1071 template <DecoratorSet decorators> 1072 inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) { 1073 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1074 INTERNAL_RT_USE_COMPRESSED_OOPS; 1075 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 1076 } 1077 1078 template <DecoratorSet decorators> 1079 inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) { 1080 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1081 INTERNAL_RT_USE_COMPRESSED_OOPS; 1082 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 1083 } 1084 1085 template <DecoratorSet decorators> 1086 inline oop atomic_cmpxchg_reduce_types(oop new_value, 1087 HeapWord* addr, 1088 oop compare_value) { 1089 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1090 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 1091 } 1092 1093 template <DecoratorSet decorators, typename T> 1094 inline T atomic_xchg_reduce_types(T new_value, T* addr) { 1095 const DecoratorSet expanded_decorators = decorators; 1096 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1097 } 1098 1099 template <DecoratorSet decorators> 1100 inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) { 1101 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1102 INTERNAL_RT_USE_COMPRESSED_OOPS; 1103 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1104 } 1105 1106 template <DecoratorSet decorators> 1107 inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) { 1108 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1109 INTERNAL_RT_USE_COMPRESSED_OOPS; 1110 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1111 } 1112 1113 template <DecoratorSet decorators> 1114 inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) { 1115 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1116 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 1117 } 1118 1119 template <DecoratorSet decorators, typename T> 1120 inline T load_reduce_types(T* addr) { 1121 return PreRuntimeDispatch::load<decorators, T>(addr); 1122 } 1123 1124 template <DecoratorSet decorators, typename T> 1125 inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) { 1126 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1127 INTERNAL_RT_USE_COMPRESSED_OOPS; 1128 return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr); 1129 } 1130 1131 template <DecoratorSet decorators, typename T> 1132 inline oop load_reduce_types(HeapWord* addr) { 1133 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1134 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr); 1135 } 1136 1137 template <DecoratorSet decorators, typename T> 1138 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, 1139 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 1140 size_t length) { 1141 PreRuntimeDispatch::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw, 1142 dst_obj, dst_offset_in_bytes, dst_raw, 1143 length); 1144 } 1145 1146 template <DecoratorSet decorators> 1147 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, HeapWord* src_raw, 1148 arrayOop dst_obj, size_t dst_offset_in_bytes, HeapWord* dst_raw, 1149 size_t length) { 1150 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 1151 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 1152 dst_obj, dst_offset_in_bytes, dst_raw, 1153 length); 1154 } 1155 1156 template <DecoratorSet decorators> 1157 inline void arraycopy_reduce_types(arrayOop src_obj, size_t src_offset_in_bytes, narrowOop* src_raw, 1158 arrayOop dst_obj, size_t dst_offset_in_bytes, narrowOop* dst_raw, 1159 size_t length) { 1160 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 1161 INTERNAL_RT_USE_COMPRESSED_OOPS; 1162 PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, src_offset_in_bytes, src_raw, 1163 dst_obj, dst_offset_in_bytes, dst_raw, 1164 length); 1165 } 1166 1167 // Step 1: Set default decorators. This step remembers if a type was volatile 1168 // and then sets the MO_VOLATILE decorator by default. Otherwise, a default 1169 // memory ordering is set for the access, and the implied decorator rules 1170 // are applied to select sensible defaults for decorators that have not been 1171 // explicitly set. For example, default object referent strength is set to strong. 1172 // This step also decays the types passed in (e.g. getting rid of CV qualifiers 1173 // and references from the types). This step also perform some type verification 1174 // that the passed in types make sense. 1175 1176 template <DecoratorSet decorators, typename T> 1177 static void verify_types(){ 1178 // If this fails to compile, then you have sent in something that is 1179 // not recognized as a valid primitive type to a primitive Access function. 1180 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated 1181 (IsPointer<T>::value || IsIntegral<T>::value) || 1182 IsFloatingPoint<T>::value)); // not allowed primitive type 1183 } 1184 1185 template <DecoratorSet decorators, typename P, typename T> 1186 inline void store(P* addr, T value) { 1187 verify_types<decorators, T>(); 1188 typedef typename Decay<P>::type DecayedP; 1189 typedef typename Decay<T>::type DecayedT; 1190 DecayedT decayed_value = value; 1191 // If a volatile address is passed in but no memory ordering decorator, 1192 // set the memory ordering to MO_VOLATILE by default. 1193 const DecoratorSet expanded_decorators = DecoratorFixup< 1194 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1195 (MO_VOLATILE | decorators) : decorators>::value; 1196 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value); 1197 } 1198 1199 template <DecoratorSet decorators, typename T> 1200 inline void store_at(oop base, ptrdiff_t offset, T value) { 1201 verify_types<decorators, T>(); 1202 typedef typename Decay<T>::type DecayedT; 1203 DecayedT decayed_value = value; 1204 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | 1205 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1206 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; 1207 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value); 1208 } 1209 1210 template <DecoratorSet decorators, typename P, typename T> 1211 inline T load(P* addr) { 1212 verify_types<decorators, T>(); 1213 typedef typename Decay<P>::type DecayedP; 1214 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 1215 typename OopOrNarrowOop<T>::type, 1216 typename Decay<T>::type>::type DecayedT; 1217 // If a volatile address is passed in but no memory ordering decorator, 1218 // set the memory ordering to MO_VOLATILE by default. 1219 const DecoratorSet expanded_decorators = DecoratorFixup< 1220 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1221 (MO_VOLATILE | decorators) : decorators>::value; 1222 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr)); 1223 } 1224 1225 template <DecoratorSet decorators, typename T> 1226 inline T load_at(oop base, ptrdiff_t offset) { 1227 verify_types<decorators, T>(); 1228 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 1229 typename OopOrNarrowOop<T>::type, 1230 typename Decay<T>::type>::type DecayedT; 1231 // Expand the decorators (figure out sensible defaults) 1232 // Potentially remember if we need compressed oop awareness 1233 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | 1234 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1235 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; 1236 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset); 1237 } 1238 1239 template <DecoratorSet decorators, typename P, typename T> 1240 inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) { 1241 verify_types<decorators, T>(); 1242 typedef typename Decay<P>::type DecayedP; 1243 typedef typename Decay<T>::type DecayedT; 1244 DecayedT new_decayed_value = new_value; 1245 DecayedT compare_decayed_value = compare_value; 1246 const DecoratorSet expanded_decorators = DecoratorFixup< 1247 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1248 (MO_SEQ_CST | decorators) : decorators>::value; 1249 return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value, 1250 const_cast<DecayedP*>(addr), 1251 compare_decayed_value); 1252 } 1253 1254 template <DecoratorSet decorators, typename T> 1255 inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 1256 verify_types<decorators, T>(); 1257 typedef typename Decay<T>::type DecayedT; 1258 DecayedT new_decayed_value = new_value; 1259 DecayedT compare_decayed_value = compare_value; 1260 // Determine default memory ordering 1261 const DecoratorSet expanded_decorators = DecoratorFixup< 1262 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 1263 (MO_SEQ_CST | decorators) : decorators>::value; 1264 // Potentially remember that we need compressed oop awareness 1265 const DecoratorSet final_decorators = expanded_decorators | 1266 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1267 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE); 1268 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base, 1269 offset, compare_decayed_value); 1270 } 1271 1272 template <DecoratorSet decorators, typename P, typename T> 1273 inline T atomic_xchg(T new_value, P* addr) { 1274 verify_types<decorators, T>(); 1275 typedef typename Decay<P>::type DecayedP; 1276 typedef typename Decay<T>::type DecayedT; 1277 DecayedT new_decayed_value = new_value; 1278 // atomic_xchg is only available in SEQ_CST flavour. 1279 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value; 1280 return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value, 1281 const_cast<DecayedP*>(addr)); 1282 } 1283 1284 template <DecoratorSet decorators, typename T> 1285 inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 1286 verify_types<decorators, T>(); 1287 typedef typename Decay<T>::type DecayedT; 1288 DecayedT new_decayed_value = new_value; 1289 // atomic_xchg is only available in SEQ_CST flavour. 1290 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST | 1291 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 1292 INTERNAL_CONVERT_COMPRESSED_OOP : DECORATORS_NONE)>::value; 1293 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset); 1294 } 1295 1296 template <DecoratorSet decorators, typename T> 1297 inline void arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, 1298 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, 1299 size_t length) { 1300 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || 1301 (IsSame<T, void>::value || IsIntegral<T>::value) || 1302 IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements 1303 typedef typename Decay<T>::type DecayedT; 1304 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IS_ARRAY | IN_HEAP>::value; 1305 arraycopy_reduce_types<expanded_decorators>(src_obj, src_offset_in_bytes, const_cast<DecayedT*>(src_raw), 1306 dst_obj, dst_offset_in_bytes, const_cast<DecayedT*>(dst_raw), 1307 length); 1308 } 1309 1310 template <DecoratorSet decorators> 1311 inline void clone(oop src, oop dst, size_t size) { 1312 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value; 1313 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size); 1314 } 1315 1316 template <DecoratorSet decorators> 1317 inline void value_copy(void* src, void* dst, ValueKlass* md) { 1318 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value; 1319 PreRuntimeDispatch::value_copy<expanded_decorators>(src, dst, md); 1320 } 1321 1322 template <DecoratorSet decorators> 1323 inline oop resolve(oop obj) { 1324 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value; 1325 return PreRuntimeDispatch::resolve<expanded_decorators>(obj); 1326 } 1327 1328 // Infer the type that should be returned from an Access::oop_load. 1329 template <typename P, DecoratorSet decorators> 1330 class OopLoadProxy: public StackObj { 1331 private: 1332 P *const _addr; 1333 public: 1334 OopLoadProxy(P* addr) : _addr(addr) {} 1335 1336 inline operator oop() { 1337 return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr); 1338 } 1339 1340 inline operator narrowOop() { 1341 return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr); 1342 } 1343 1344 template <typename T> 1345 inline bool operator ==(const T& other) const { 1346 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other; 1347 } 1348 1349 template <typename T> 1350 inline bool operator !=(const T& other) const { 1351 return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other; 1352 } 1353 }; 1354 1355 // Infer the type that should be returned from an Access::load_at. 1356 template <DecoratorSet decorators> 1357 class LoadAtProxy: public StackObj { 1358 private: 1359 const oop _base; 1360 const ptrdiff_t _offset; 1361 public: 1362 LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {} 1363 1364 template <typename T> 1365 inline operator T() const { 1366 return load_at<decorators, T>(_base, _offset); 1367 } 1368 1369 template <typename T> 1370 inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; } 1371 1372 template <typename T> 1373 inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; } 1374 }; 1375 1376 // Infer the type that should be returned from an Access::oop_load_at. 1377 template <DecoratorSet decorators> 1378 class OopLoadAtProxy: public StackObj { 1379 private: 1380 const oop _base; 1381 const ptrdiff_t _offset; 1382 public: 1383 OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {} 1384 1385 inline operator oop() const { 1386 return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset); 1387 } 1388 1389 inline operator narrowOop() const { 1390 return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset); 1391 } 1392 1393 template <typename T> 1394 inline bool operator ==(const T& other) const { 1395 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other; 1396 } 1397 1398 template <typename T> 1399 inline bool operator !=(const T& other) const { 1400 return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other; 1401 } 1402 }; 1403 } 1404 1405 #endif // SHARE_OOPS_ACCESSBACKEND_HPP