1 /* 2 * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP 26 #define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP 27 28 #include "gc/shared/barrierSet.inline.hpp" 29 #include "metaprogramming/conditional.hpp" 30 #include "metaprogramming/isFloatingPoint.hpp" 31 #include "metaprogramming/isIntegral.hpp" 32 #include "metaprogramming/isPointer.hpp" 33 #include "metaprogramming/isVolatile.hpp" 34 #include "oops/access.hpp" 35 #include "oops/accessBackend.inline.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/orderAccess.inline.hpp" 38 39 // This file outlines the template pipeline of accesses going through the Access 40 // API. There are essentially 5 steps for each access. 41 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers 42 // and sets default decorators to sensible values. 43 // * Step 2: Reduce types. This step makes sure there is only a single T type and not 44 // multiple types. The P type of the address and T type of the value must 45 // match. 46 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be 47 // avoided, and in that case avoids it (calling raw accesses or 48 // primitive accesses in a build that does not require primitive GC barriers) 49 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding 50 // BarrierSet::AccessBarrier accessor that attaches GC-required barriers 51 // to the access. 52 // * Step 5: Post-runtime dispatch. This step now casts previously unknown types such 53 // as the address type of an oop on the heap (is it oop* or narrowOop*) to 54 // the appropriate type. It also splits sufficiently orthogonal accesses into 55 // different functions, such as whether the access involves oops or primitives 56 // and whether the access is performed on the heap or outside. Then the 57 // appropriate BarrierSet::AccessBarrier is called to perform the access. 58 59 namespace AccessInternal { 60 61 // Step 5: Post-runtime dispatch. 62 // This class is the last step before calling the BarrierSet::AccessBarrier. 63 // Here we make sure to figure out types that were not known prior to the 64 // runtime dispatch, such as whether an oop on the heap is oop or narrowOop. 65 // We also split orthogonal barriers such as handling primitives vs oops 66 // and on-heap vs off-heap into different calls to the barrier set. 67 template <class GCBarrierType, BarrierType type, DecoratorSet decorators> 68 struct PostRuntimeDispatch: public AllStatic { }; 69 70 template <class GCBarrierType, DecoratorSet decorators> 71 struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE, decorators>: public AllStatic { 72 template <typename T> 73 static void access_barrier(void* addr, T value) { 74 GCBarrierType::store_in_heap(reinterpret_cast<T*>(addr), value); 75 } 76 77 static void oop_access_barrier(void* addr, oop value) { 78 typedef typename HeapOopType<decorators>::type OopType; 79 if (HasDecorator<decorators, IN_HEAP>::value) { 80 GCBarrierType::oop_store_in_heap(reinterpret_cast<OopType*>(addr), value); 81 } else { 82 GCBarrierType::oop_store_not_in_heap(reinterpret_cast<OopType*>(addr), value); 83 } 84 } 85 }; 86 87 template <class GCBarrierType, DecoratorSet decorators> 88 struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD, decorators>: public AllStatic { 89 template <typename T> 90 static T access_barrier(void* addr) { 91 return GCBarrierType::load_in_heap(reinterpret_cast<T*>(addr)); 92 } 93 94 static oop oop_access_barrier(void* addr) { 95 typedef typename HeapOopType<decorators>::type OopType; 96 if (HasDecorator<decorators, IN_HEAP>::value) { 97 return GCBarrierType::oop_load_in_heap(reinterpret_cast<OopType*>(addr)); 98 } else { 99 return GCBarrierType::oop_load_not_in_heap(reinterpret_cast<OopType*>(addr)); 100 } 101 } 102 }; 103 104 template <class GCBarrierType, DecoratorSet decorators> 105 struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG, decorators>: public AllStatic { 106 template <typename T> 107 static T access_barrier(T new_value, void* addr) { 108 return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast<T*>(addr)); 109 } 110 111 static oop oop_access_barrier(oop new_value, void* addr) { 112 typedef typename HeapOopType<decorators>::type OopType; 113 if (HasDecorator<decorators, IN_HEAP>::value) { 114 return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast<OopType*>(addr)); 115 } else { 116 return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr)); 117 } 118 } 119 }; 120 121 template <class GCBarrierType, DecoratorSet decorators> 122 struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG, decorators>: public AllStatic { 123 template <typename T> 124 static T access_barrier(T new_value, void* addr, T compare_value) { 125 return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast<T*>(addr), compare_value); 126 } 127 128 static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) { 129 typedef typename HeapOopType<decorators>::type OopType; 130 if (HasDecorator<decorators, IN_HEAP>::value) { 131 return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value); 132 } else { 133 return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value); 134 } 135 } 136 }; 137 138 template <class GCBarrierType, DecoratorSet decorators> 139 struct PostRuntimeDispatch<GCBarrierType, BARRIER_ARRAYCOPY, decorators>: public AllStatic { 140 template <typename T> 141 static bool access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { 142 return GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length); 143 } 144 145 template <typename T> 146 static bool oop_access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { 147 typedef typename HeapOopType<decorators>::type OopType; 148 return GCBarrierType::oop_arraycopy_in_heap(src_obj, dst_obj, 149 reinterpret_cast<OopType*>(src), 150 reinterpret_cast<OopType*>(dst), length); 151 } 152 }; 153 154 template <class GCBarrierType, DecoratorSet decorators> 155 struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE_AT, decorators>: public AllStatic { 156 template <typename T> 157 static void access_barrier(oop base, ptrdiff_t offset, T value) { 158 GCBarrierType::store_in_heap_at(base, offset, value); 159 } 160 161 static void oop_access_barrier(oop base, ptrdiff_t offset, oop value) { 162 GCBarrierType::oop_store_in_heap_at(base, offset, value); 163 } 164 }; 165 166 template <class GCBarrierType, DecoratorSet decorators> 167 struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD_AT, decorators>: public AllStatic { 168 template <typename T> 169 static T access_barrier(oop base, ptrdiff_t offset) { 170 return GCBarrierType::template load_in_heap_at<T>(base, offset); 171 } 172 173 static oop oop_access_barrier(oop base, ptrdiff_t offset) { 174 return GCBarrierType::oop_load_in_heap_at(base, offset); 175 } 176 }; 177 178 template <class GCBarrierType, DecoratorSet decorators> 179 struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG_AT, decorators>: public AllStatic { 180 template <typename T> 181 static T access_barrier(T new_value, oop base, ptrdiff_t offset) { 182 return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset); 183 } 184 185 static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) { 186 return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset); 187 } 188 }; 189 190 template <class GCBarrierType, DecoratorSet decorators> 191 struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG_AT, decorators>: public AllStatic { 192 template <typename T> 193 static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) { 194 return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); 195 } 196 197 static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { 198 return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); 199 } 200 }; 201 202 template <class GCBarrierType, DecoratorSet decorators> 203 struct PostRuntimeDispatch<GCBarrierType, BARRIER_CLONE, decorators>: public AllStatic { 204 static void access_barrier(oop src, oop dst, size_t size) { 205 GCBarrierType::clone_in_heap(src, dst, size); 206 } 207 }; 208 209 // Resolving accessors with barriers from the barrier set happens in two steps. 210 // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off. 211 // 2. Expand paths for each BarrierSet available in the system. 212 template <DecoratorSet decorators, typename FunctionPointerT, BarrierType barrier_type> 213 struct BarrierResolver: public AllStatic { 214 template <DecoratorSet ds> 215 static typename EnableIf< 216 HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value, 217 FunctionPointerT>::type 218 resolve_barrier_gc() { 219 BarrierSet* bs = BarrierSet::barrier_set(); 220 assert(bs != NULL, "GC barriers invoked before BarrierSet is set"); 221 switch (bs->kind()) { 222 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ 223 case BarrierSet::bs_name: { \ 224 return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \ 225 AccessBarrier<ds>, barrier_type, ds>::oop_access_barrier; \ 226 } \ 227 break; 228 FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE) 229 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE 230 231 default: 232 fatal("BarrierSet AccessBarrier resolving not implemented"); 233 return NULL; 234 }; 235 } 236 237 template <DecoratorSet ds> 238 static typename EnableIf< 239 !HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value, 240 FunctionPointerT>::type 241 resolve_barrier_gc() { 242 BarrierSet* bs = BarrierSet::barrier_set(); 243 assert(bs != NULL, "GC barriers invoked before BarrierSet is set"); 244 switch (bs->kind()) { 245 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ 246 case BarrierSet::bs_name: { \ 247 return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \ 248 AccessBarrier<ds>, barrier_type, ds>::access_barrier; \ 249 } \ 250 break; 251 FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE) 252 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE 253 254 default: 255 fatal("BarrierSet AccessBarrier resolving not implemented"); 256 return NULL; 257 }; 258 } 259 260 static FunctionPointerT resolve_barrier_rt() { 261 if (UseCompressedOops) { 262 const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS; 263 return resolve_barrier_gc<expanded_decorators>(); 264 } else { 265 return resolve_barrier_gc<decorators>(); 266 } 267 } 268 269 static FunctionPointerT resolve_barrier() { 270 return resolve_barrier_rt(); 271 } 272 }; 273 274 // Step 4: Runtime dispatch 275 // The RuntimeDispatch class is responsible for performing a runtime dispatch of the 276 // accessor. This is required when the access either depends on whether compressed oops 277 // is being used, or it depends on which GC implementation was chosen (e.g. requires GC 278 // barriers). The way it works is that a function pointer initially pointing to an 279 // accessor resolution function gets called for each access. Upon first invocation, 280 // it resolves which accessor to be used in future invocations and patches the 281 // function pointer to this new accessor. 282 283 template <DecoratorSet decorators, typename T, BarrierType type> 284 struct RuntimeDispatch: AllStatic {}; 285 286 template <DecoratorSet decorators, typename T> 287 struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic { 288 typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t; 289 static func_t _store_func; 290 291 static void store_init(void* addr, T value) { 292 func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier(); 293 _store_func = function; 294 function(addr, value); 295 } 296 297 static inline void store(void* addr, T value) { 298 _store_func(addr, value); 299 } 300 }; 301 302 template <DecoratorSet decorators, typename T> 303 struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic { 304 typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t; 305 static func_t _store_at_func; 306 307 static void store_at_init(oop base, ptrdiff_t offset, T value) { 308 func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier(); 309 _store_at_func = function; 310 function(base, offset, value); 311 } 312 313 static inline void store_at(oop base, ptrdiff_t offset, T value) { 314 _store_at_func(base, offset, value); 315 } 316 }; 317 318 template <DecoratorSet decorators, typename T> 319 struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic { 320 typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t; 321 static func_t _load_func; 322 323 static T load_init(void* addr) { 324 func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier(); 325 _load_func = function; 326 return function(addr); 327 } 328 329 static inline T load(void* addr) { 330 return _load_func(addr); 331 } 332 }; 333 334 template <DecoratorSet decorators, typename T> 335 struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic { 336 typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t; 337 static func_t _load_at_func; 338 339 static T load_at_init(oop base, ptrdiff_t offset) { 340 func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier(); 341 _load_at_func = function; 342 return function(base, offset); 343 } 344 345 static inline T load_at(oop base, ptrdiff_t offset) { 346 return _load_at_func(base, offset); 347 } 348 }; 349 350 template <DecoratorSet decorators, typename T> 351 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic { 352 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t; 353 static func_t _atomic_cmpxchg_func; 354 355 static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) { 356 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier(); 357 _atomic_cmpxchg_func = function; 358 return function(new_value, addr, compare_value); 359 } 360 361 static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) { 362 return _atomic_cmpxchg_func(new_value, addr, compare_value); 363 } 364 }; 365 366 template <DecoratorSet decorators, typename T> 367 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic { 368 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t; 369 static func_t _atomic_cmpxchg_at_func; 370 371 static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) { 372 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier(); 373 _atomic_cmpxchg_at_func = function; 374 return function(new_value, base, offset, compare_value); 375 } 376 377 static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 378 return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value); 379 } 380 }; 381 382 template <DecoratorSet decorators, typename T> 383 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic { 384 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t; 385 static func_t _atomic_xchg_func; 386 387 static T atomic_xchg_init(T new_value, void* addr) { 388 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier(); 389 _atomic_xchg_func = function; 390 return function(new_value, addr); 391 } 392 393 static inline T atomic_xchg(T new_value, void* addr) { 394 return _atomic_xchg_func(new_value, addr); 395 } 396 }; 397 398 template <DecoratorSet decorators, typename T> 399 struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic { 400 typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t; 401 static func_t _atomic_xchg_at_func; 402 403 static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) { 404 func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier(); 405 _atomic_xchg_at_func = function; 406 return function(new_value, base, offset); 407 } 408 409 static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 410 return _atomic_xchg_at_func(new_value, base, offset); 411 } 412 }; 413 414 template <DecoratorSet decorators, typename T> 415 struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic { 416 typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t; 417 static func_t _arraycopy_func; 418 419 static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) { 420 func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier(); 421 _arraycopy_func = function; 422 return function(src_obj, dst_obj, src, dst, length); 423 } 424 425 static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) { 426 return _arraycopy_func(src_obj, dst_obj, src, dst, length); 427 } 428 }; 429 430 template <DecoratorSet decorators, typename T> 431 struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic { 432 typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t; 433 static func_t _clone_func; 434 435 static void clone_init(oop src, oop dst, size_t size) { 436 func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier(); 437 _clone_func = function; 438 function(src, dst, size); 439 } 440 441 static inline void clone(oop src, oop dst, size_t size) { 442 _clone_func(src, dst, size); 443 } 444 }; 445 446 // Initialize the function pointers to point to the resolving function. 447 template <DecoratorSet decorators, typename T> 448 typename AccessFunction<decorators, T, BARRIER_STORE>::type 449 RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init; 450 451 template <DecoratorSet decorators, typename T> 452 typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type 453 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init; 454 455 template <DecoratorSet decorators, typename T> 456 typename AccessFunction<decorators, T, BARRIER_LOAD>::type 457 RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init; 458 459 template <DecoratorSet decorators, typename T> 460 typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type 461 RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init; 462 463 template <DecoratorSet decorators, typename T> 464 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type 465 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init; 466 467 template <DecoratorSet decorators, typename T> 468 typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type 469 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init; 470 471 template <DecoratorSet decorators, typename T> 472 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type 473 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init; 474 475 template <DecoratorSet decorators, typename T> 476 typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type 477 RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init; 478 479 template <DecoratorSet decorators, typename T> 480 typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type 481 RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init; 482 483 template <DecoratorSet decorators, typename T> 484 typename AccessFunction<decorators, T, BARRIER_CLONE>::type 485 RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init; 486 487 // Step 3: Pre-runtime dispatching. 488 // The PreRuntimeDispatch class is responsible for filtering the barrier strength 489 // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime 490 // dispatch point. Otherwise it goes through a runtime check if hardwiring was 491 // not possible. 492 struct PreRuntimeDispatch: AllStatic { 493 template<DecoratorSet decorators> 494 static bool can_hardwire_raw() { 495 return !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access 496 !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address) 497 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value; // we can infer we use compressed oops (narrowOop* address) 498 } 499 500 static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP; 501 502 template<DecoratorSet decorators> 503 static bool is_hardwired_primitive() { 504 return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value && 505 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value; 506 } 507 508 template <DecoratorSet decorators, typename T> 509 inline static typename EnableIf< 510 HasDecorator<decorators, AS_RAW>::value>::type 511 store(void* addr, T value) { 512 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 513 if (can_hardwire_raw<decorators>()) { 514 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 515 Raw::oop_store(addr, value); 516 } else { 517 Raw::store(addr, value); 518 } 519 } else if (UseCompressedOops) { 520 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 521 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 522 } else { 523 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 524 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 525 } 526 } 527 528 template <DecoratorSet decorators, typename T> 529 inline static typename EnableIf< 530 !HasDecorator<decorators, AS_RAW>::value>::type 531 store(void* addr, T value) { 532 if (is_hardwired_primitive<decorators>()) { 533 const DecoratorSet expanded_decorators = decorators | AS_RAW; 534 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 535 } else { 536 RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value); 537 } 538 } 539 540 template <DecoratorSet decorators, typename T> 541 inline static typename EnableIf< 542 HasDecorator<decorators, AS_RAW>::value>::type 543 store_at(oop base, ptrdiff_t offset, T value) { 544 store<decorators>(field_addr(base, offset), value); 545 } 546 547 template <DecoratorSet decorators, typename T> 548 inline static typename EnableIf< 549 !HasDecorator<decorators, AS_RAW>::value>::type 550 store_at(oop base, ptrdiff_t offset, T value) { 551 if (is_hardwired_primitive<decorators>()) { 552 const DecoratorSet expanded_decorators = decorators | AS_RAW; 553 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value); 554 } else { 555 RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value); 556 } 557 } 558 559 template <DecoratorSet decorators, typename T> 560 inline static typename EnableIf< 561 HasDecorator<decorators, AS_RAW>::value, T>::type 562 load(void* addr) { 563 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 564 if (can_hardwire_raw<decorators>()) { 565 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 566 return Raw::template oop_load<T>(addr); 567 } else { 568 return Raw::template load<T>(addr); 569 } 570 } else if (UseCompressedOops) { 571 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 572 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 573 } else { 574 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 575 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 576 } 577 } 578 579 template <DecoratorSet decorators, typename T> 580 inline static typename EnableIf< 581 !HasDecorator<decorators, AS_RAW>::value, T>::type 582 load(void* addr) { 583 if (is_hardwired_primitive<decorators>()) { 584 const DecoratorSet expanded_decorators = decorators | AS_RAW; 585 return PreRuntimeDispatch::load<expanded_decorators, T>(addr); 586 } else { 587 return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr); 588 } 589 } 590 591 template <DecoratorSet decorators, typename T> 592 inline static typename EnableIf< 593 HasDecorator<decorators, AS_RAW>::value, T>::type 594 load_at(oop base, ptrdiff_t offset) { 595 return load<decorators, T>(field_addr(base, offset)); 596 } 597 598 template <DecoratorSet decorators, typename T> 599 inline static typename EnableIf< 600 !HasDecorator<decorators, AS_RAW>::value, T>::type 601 load_at(oop base, ptrdiff_t offset) { 602 if (is_hardwired_primitive<decorators>()) { 603 const DecoratorSet expanded_decorators = decorators | AS_RAW; 604 return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset); 605 } else { 606 return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset); 607 } 608 } 609 610 template <DecoratorSet decorators, typename T> 611 inline static typename EnableIf< 612 HasDecorator<decorators, AS_RAW>::value, T>::type 613 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 614 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 615 if (can_hardwire_raw<decorators>()) { 616 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 617 return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); 618 } else { 619 return Raw::atomic_cmpxchg(new_value, addr, compare_value); 620 } 621 } else if (UseCompressedOops) { 622 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 623 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 624 } else { 625 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 626 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 627 } 628 } 629 630 template <DecoratorSet decorators, typename T> 631 inline static typename EnableIf< 632 !HasDecorator<decorators, AS_RAW>::value, T>::type 633 atomic_cmpxchg(T new_value, void* addr, T compare_value) { 634 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 635 if (is_hardwired_primitive<decorators>()) { 636 const DecoratorSet expanded_decorators = decorators | AS_RAW; 637 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 638 } else { 639 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value); 640 } 641 } 642 643 template <DecoratorSet decorators, typename T> 644 inline static typename EnableIf< 645 HasDecorator<decorators, AS_RAW>::value, T>::type 646 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 647 return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value); 648 } 649 650 template <DecoratorSet decorators, typename T> 651 inline static typename EnableIf< 652 !HasDecorator<decorators, AS_RAW>::value, T>::type 653 atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 654 if (is_hardwired_primitive<decorators>()) { 655 const DecoratorSet expanded_decorators = decorators | AS_RAW; 656 return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value); 657 } else { 658 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value); 659 } 660 } 661 662 template <DecoratorSet decorators, typename T> 663 inline static typename EnableIf< 664 HasDecorator<decorators, AS_RAW>::value, T>::type 665 atomic_xchg(T new_value, void* addr) { 666 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 667 if (can_hardwire_raw<decorators>()) { 668 if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) { 669 return Raw::oop_atomic_xchg(new_value, addr); 670 } else { 671 return Raw::atomic_xchg(new_value, addr); 672 } 673 } else if (UseCompressedOops) { 674 const DecoratorSet expanded_decorators = decorators | convert_compressed_oops; 675 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 676 } else { 677 const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops; 678 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 679 } 680 } 681 682 template <DecoratorSet decorators, typename T> 683 inline static typename EnableIf< 684 !HasDecorator<decorators, AS_RAW>::value, T>::type 685 atomic_xchg(T new_value, void* addr) { 686 if (is_hardwired_primitive<decorators>()) { 687 const DecoratorSet expanded_decorators = decorators | AS_RAW; 688 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 689 } else { 690 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr); 691 } 692 } 693 694 template <DecoratorSet decorators, typename T> 695 inline static typename EnableIf< 696 HasDecorator<decorators, AS_RAW>::value, T>::type 697 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 698 return atomic_xchg<decorators>(new_value, field_addr(base, offset)); 699 } 700 701 template <DecoratorSet decorators, typename T> 702 inline static typename EnableIf< 703 !HasDecorator<decorators, AS_RAW>::value, T>::type 704 atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 705 if (is_hardwired_primitive<decorators>()) { 706 const DecoratorSet expanded_decorators = decorators | AS_RAW; 707 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset); 708 } else { 709 return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset); 710 } 711 } 712 713 template <DecoratorSet decorators, typename T> 714 inline static typename EnableIf< 715 HasDecorator<decorators, AS_RAW>::value, bool>::type 716 arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) { 717 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 718 return Raw::arraycopy(src, dst, length); 719 } 720 721 template <DecoratorSet decorators, typename T> 722 inline static typename EnableIf< 723 !HasDecorator<decorators, AS_RAW>::value, bool>::type 724 arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) { 725 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 726 if (is_hardwired_primitive<decorators>()) { 727 const DecoratorSet expanded_decorators = decorators | AS_RAW; 728 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length); 729 } else { 730 return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length); 731 } 732 } 733 734 template <DecoratorSet decorators> 735 inline static typename EnableIf< 736 HasDecorator<decorators, AS_RAW>::value>::type 737 clone(oop src, oop dst, size_t size) { 738 typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw; 739 Raw::clone(src, dst, size); 740 } 741 742 template <DecoratorSet decorators> 743 inline static typename EnableIf< 744 !HasDecorator<decorators, AS_RAW>::value>::type 745 clone(oop src, oop dst, size_t size) { 746 RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size); 747 } 748 }; 749 750 // This class adds implied decorators that follow according to decorator rules. 751 // For example adding default reference strength and default memory ordering 752 // semantics. 753 template <DecoratorSet input_decorators> 754 struct DecoratorFixup: AllStatic { 755 // If no reference strength has been picked, then strong will be picked 756 static const DecoratorSet ref_strength_default = input_decorators | 757 (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ? 758 ON_STRONG_OOP_REF : INTERNAL_EMPTY); 759 // If no memory ordering has been picked, unordered will be picked 760 static const DecoratorSet memory_ordering_default = ref_strength_default | 761 ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY); 762 // If no barrier strength has been picked, normal will be used 763 static const DecoratorSet barrier_strength_default = memory_ordering_default | 764 ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY); 765 // Heap array accesses imply it is a heap access 766 static const DecoratorSet heap_array_is_in_heap = barrier_strength_default | 767 ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY); 768 static const DecoratorSet conc_root_is_root = heap_array_is_in_heap | 769 ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY); 770 static const DecoratorSet archive_root_is_root = conc_root_is_root | 771 ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY); 772 static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS; 773 }; 774 775 // Step 2: Reduce types. 776 // Enforce that for non-oop types, T and P have to be strictly the same. 777 // P is the type of the address and T is the type of the values. 778 // As for oop types, it is allow to send T in {narrowOop, oop} and 779 // P in {narrowOop, oop, HeapWord*}. The following rules apply according to 780 // the subsequent table. (columns are P, rows are T) 781 // | | HeapWord | oop | narrowOop | 782 // | oop | rt-comp | hw-none | hw-comp | 783 // | narrowOop | x | x | hw-none | 784 // 785 // x means not allowed 786 // rt-comp means it must be checked at runtime whether the oop is compressed. 787 // hw-none means it is statically known the oop will not be compressed. 788 // hw-comp means it is statically known the oop will be compressed. 789 790 template <DecoratorSet decorators, typename T> 791 inline void store_reduce_types(T* addr, T value) { 792 PreRuntimeDispatch::store<decorators>(addr, value); 793 } 794 795 template <DecoratorSet decorators> 796 inline void store_reduce_types(narrowOop* addr, oop value) { 797 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 798 INTERNAL_RT_USE_COMPRESSED_OOPS; 799 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 800 } 801 802 template <DecoratorSet decorators> 803 inline void store_reduce_types(HeapWord* addr, oop value) { 804 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 805 PreRuntimeDispatch::store<expanded_decorators>(addr, value); 806 } 807 808 template <DecoratorSet decorators, typename T> 809 inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) { 810 return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value); 811 } 812 813 template <DecoratorSet decorators> 814 inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) { 815 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 816 INTERNAL_RT_USE_COMPRESSED_OOPS; 817 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 818 } 819 820 template <DecoratorSet decorators> 821 inline oop atomic_cmpxchg_reduce_types(oop new_value, HeapWord* addr, oop compare_value) { 822 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 823 return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value); 824 } 825 826 template <DecoratorSet decorators, typename T> 827 inline T atomic_xchg_reduce_types(T new_value, T* addr) { 828 const DecoratorSet expanded_decorators = decorators; 829 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 830 } 831 832 template <DecoratorSet decorators> 833 inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) { 834 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | 835 INTERNAL_RT_USE_COMPRESSED_OOPS; 836 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 837 } 838 839 template <DecoratorSet decorators> 840 inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) { 841 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 842 return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr); 843 } 844 845 template <DecoratorSet decorators, typename T> 846 inline T load_reduce_types(T* addr) { 847 return PreRuntimeDispatch::load<decorators, T>(addr); 848 } 849 850 template <DecoratorSet decorators, typename T> 851 inline oop load_reduce_types(narrowOop* addr) { 852 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_RT_USE_COMPRESSED_OOPS; 853 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr); 854 } 855 856 template <DecoratorSet decorators, typename T> 857 inline oop load_reduce_types(HeapWord* addr) { 858 const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP; 859 return PreRuntimeDispatch::load<expanded_decorators, oop>(addr); 860 } 861 862 // Step 1: Set default decorators. This step remembers if a type was volatile 863 // and then sets the MO_VOLATILE decorator by default. Otherwise, a default 864 // memory ordering is set for the access, and the implied decorator rules 865 // are applied to select sensible defaults for decorators that have not been 866 // explicitly set. For example, default object referent strength is set to strong. 867 // This step also decays the types passed in (e.g. getting rid of CV qualifiers 868 // and references from the types). This step also perform some type verification 869 // that the passed in types make sense. 870 871 template <DecoratorSet decorators, typename T> 872 static void verify_types(){ 873 // If this fails to compile, then you have sent in something that is 874 // not recognized as a valid primitive type to a primitive Access function. 875 STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated 876 (IsPointer<T>::value || IsIntegral<T>::value) || 877 IsFloatingPoint<T>::value)); // not allowed primitive type 878 } 879 880 template <DecoratorSet decorators, typename P, typename T> 881 inline void store(P* addr, T value) { 882 verify_types<decorators, T>(); 883 typedef typename Decay<P>::type DecayedP; 884 typedef typename Decay<T>::type DecayedT; 885 DecayedT decayed_value = value; 886 // If a volatile address is passed in but no memory ordering decorator, 887 // set the memory ordering to MO_VOLATILE by default. 888 const DecoratorSet expanded_decorators = DecoratorFixup< 889 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 890 (MO_VOLATILE | decorators) : decorators>::value; 891 store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value); 892 } 893 894 template <DecoratorSet decorators, typename T> 895 inline void store_at(oop base, ptrdiff_t offset, T value) { 896 verify_types<decorators, T>(); 897 typedef typename Decay<T>::type DecayedT; 898 DecayedT decayed_value = value; 899 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | 900 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 901 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value; 902 PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value); 903 } 904 905 template <DecoratorSet decorators, typename P, typename T> 906 inline T load(P* addr) { 907 verify_types<decorators, T>(); 908 typedef typename Decay<P>::type DecayedP; 909 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 910 typename OopOrNarrowOop<T>::type, 911 typename Decay<T>::type>::type DecayedT; 912 // If a volatile address is passed in but no memory ordering decorator, 913 // set the memory ordering to MO_VOLATILE by default. 914 const DecoratorSet expanded_decorators = DecoratorFixup< 915 (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 916 (MO_VOLATILE | decorators) : decorators>::value; 917 return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr)); 918 } 919 920 template <DecoratorSet decorators, typename T> 921 inline T load_at(oop base, ptrdiff_t offset) { 922 verify_types<decorators, T>(); 923 typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value, 924 typename OopOrNarrowOop<T>::type, 925 typename Decay<T>::type>::type DecayedT; 926 // Expand the decorators (figure out sensible defaults) 927 // Potentially remember if we need compressed oop awareness 928 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | 929 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 930 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value; 931 return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset); 932 } 933 934 template <DecoratorSet decorators, typename P, typename T> 935 inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) { 936 verify_types<decorators, T>(); 937 typedef typename Decay<P>::type DecayedP; 938 typedef typename Decay<T>::type DecayedT; 939 DecayedT new_decayed_value = new_value; 940 DecayedT compare_decayed_value = compare_value; 941 const DecoratorSet expanded_decorators = DecoratorFixup< 942 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 943 (MO_SEQ_CST | decorators) : decorators>::value; 944 return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value, 945 const_cast<DecayedP*>(addr), 946 compare_decayed_value); 947 } 948 949 template <DecoratorSet decorators, typename T> 950 inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 951 verify_types<decorators, T>(); 952 typedef typename Decay<T>::type DecayedT; 953 DecayedT new_decayed_value = new_value; 954 DecayedT compare_decayed_value = compare_value; 955 // Determine default memory ordering 956 const DecoratorSet expanded_decorators = DecoratorFixup< 957 (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ? 958 (MO_SEQ_CST | decorators) : decorators>::value; 959 // Potentially remember that we need compressed oop awareness 960 const DecoratorSet final_decorators = expanded_decorators | 961 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 962 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY); 963 return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base, 964 offset, compare_decayed_value); 965 } 966 967 template <DecoratorSet decorators, typename P, typename T> 968 inline T atomic_xchg(T new_value, P* addr) { 969 verify_types<decorators, T>(); 970 typedef typename Decay<P>::type DecayedP; 971 typedef typename Decay<T>::type DecayedT; 972 DecayedT new_decayed_value = new_value; 973 // atomic_xchg is only available in SEQ_CST flavour. 974 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value; 975 return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value, 976 const_cast<DecayedP*>(addr)); 977 } 978 979 template <DecoratorSet decorators, typename T> 980 inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 981 verify_types<decorators, T>(); 982 typedef typename Decay<T>::type DecayedT; 983 DecayedT new_decayed_value = new_value; 984 // atomic_xchg is only available in SEQ_CST flavour. 985 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST | 986 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 987 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value; 988 return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset); 989 } 990 991 template <DecoratorSet decorators, typename T> 992 inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) { 993 verify_types<decorators, T>(); 994 typedef typename Decay<T>::type DecayedT; 995 const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP | 996 (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ? 997 INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value; 998 return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, 999 const_cast<DecayedT*>(src), 1000 const_cast<DecayedT*>(dst), 1001 length); 1002 } 1003 1004 template <DecoratorSet decorators> 1005 inline void clone(oop src, oop dst, size_t size) { 1006 const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value; 1007 PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size); 1008 } 1009 } 1010 1011 template <DecoratorSet decorators> 1012 template <DecoratorSet expected_decorators> 1013 void Access<decorators>::verify_decorators() { 1014 STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used 1015 const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK; 1016 STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set 1017 (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 || 1018 (barrier_strength_decorators ^ AS_RAW) == 0 || 1019 (barrier_strength_decorators ^ AS_NORMAL) == 0 1020 )); 1021 const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK; 1022 STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set 1023 (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 || 1024 (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 || 1025 (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 || 1026 (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0 1027 )); 1028 const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK; 1029 STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set 1030 (memory_ordering_decorators ^ MO_UNORDERED) == 0 || 1031 (memory_ordering_decorators ^ MO_VOLATILE) == 0 || 1032 (memory_ordering_decorators ^ MO_RELAXED) == 0 || 1033 (memory_ordering_decorators ^ MO_ACQUIRE) == 0 || 1034 (memory_ordering_decorators ^ MO_RELEASE) == 0 || 1035 (memory_ordering_decorators ^ MO_SEQ_CST) == 0 1036 )); 1037 const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK; 1038 STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set 1039 (location_decorators ^ IN_ROOT) == 0 || 1040 (location_decorators ^ IN_HEAP) == 0 || 1041 (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 || 1042 (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 || 1043 (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0 1044 )); 1045 } 1046 1047 #endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP