/* * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_OOPS_ACCESS_INLINE_HPP #define SHARE_OOPS_ACCESS_INLINE_HPP #include "gc/shared/barrierSetConfig.inline.hpp" #include "oops/access.hpp" #include "oops/accessBackend.inline.hpp" // This file outlines the last 2 steps of the template pipeline of accesses going through // the Access API. // * Step 5.a: Barrier resolution. This step is invoked the first time a runtime-dispatch // happens for an access. The appropriate BarrierSet::AccessBarrier accessor // is resolved, then the function pointer is updated to that accessor for // future invocations. // * Step 5.b: Post-runtime dispatch. This step now casts previously unknown types such // as the address type of an oop on the heap (is it oop* or narrowOop*) to // the appropriate type. It also splits sufficiently orthogonal accesses into // different functions, such as whether the access involves oops or primitives // and whether the access is performed on the heap or outside. Then the // appropriate BarrierSet::AccessBarrier is called to perform the access. namespace AccessInternal { // Step 5.b: Post-runtime dispatch. // This class is the last step before calling the BarrierSet::AccessBarrier. // Here we make sure to figure out types that were not known prior to the // runtime dispatch, such as whether an oop on the heap is oop or narrowOop. // We also split orthogonal barriers such as handling primitives vs oops // and on-heap vs off-heap into different calls to the barrier set. template struct PostRuntimeDispatch: public AllStatic { }; template struct PostRuntimeDispatch: public AllStatic { template static void access_barrier(void* addr, T value) { GCBarrierType::store_in_heap(reinterpret_cast(addr), value); } static void oop_access_barrier(void* addr, oop value) { typedef typename HeapOopType::type OopType; if (HasDecorator::value) { GCBarrierType::oop_store_in_heap(reinterpret_cast(addr), value); } else { GCBarrierType::oop_store_not_in_heap(reinterpret_cast(addr), value); } } }; template struct PostRuntimeDispatch: public AllStatic { template static T access_barrier(void* addr) { return GCBarrierType::load_in_heap(reinterpret_cast(addr)); } static oop oop_access_barrier(void* addr) { typedef typename HeapOopType::type OopType; if (HasDecorator::value) { return GCBarrierType::oop_load_in_heap(reinterpret_cast(addr)); } else { return GCBarrierType::oop_load_not_in_heap(reinterpret_cast(addr)); } } }; template struct PostRuntimeDispatch: public AllStatic { template static T access_barrier(T new_value, void* addr) { return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast(addr)); } static oop oop_access_barrier(oop new_value, void* addr) { typedef typename HeapOopType::type OopType; if (HasDecorator::value) { return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast(addr)); } else { return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast(addr)); } } }; template struct PostRuntimeDispatch: public AllStatic { template static T access_barrier(T new_value, void* addr, T compare_value) { return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast(addr), compare_value); } static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) { typedef typename HeapOopType::type OopType; if (HasDecorator::value) { return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast(addr), compare_value); } else { return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast(addr), compare_value); } } }; template struct PostRuntimeDispatch: public AllStatic { template static bool access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length); return true; } template static bool oop_access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { typedef typename HeapOopType::type OopType; return GCBarrierType::oop_arraycopy_in_heap(src_obj, dst_obj, reinterpret_cast(src), reinterpret_cast(dst), length); } }; template struct PostRuntimeDispatch: public AllStatic { template static void access_barrier(oop base, ptrdiff_t offset, T value) { GCBarrierType::store_in_heap_at(base, offset, value); } static void oop_access_barrier(oop base, ptrdiff_t offset, oop value) { GCBarrierType::oop_store_in_heap_at(base, offset, value); } }; template struct PostRuntimeDispatch: public AllStatic { template static T access_barrier(oop base, ptrdiff_t offset) { return GCBarrierType::template load_in_heap_at(base, offset); } static oop oop_access_barrier(oop base, ptrdiff_t offset) { return GCBarrierType::oop_load_in_heap_at(base, offset); } }; template struct PostRuntimeDispatch: public AllStatic { template static T access_barrier(T new_value, oop base, ptrdiff_t offset) { return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset); } static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) { return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset); } }; template struct PostRuntimeDispatch: public AllStatic { template static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) { return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); } static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) { return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value); } }; template struct PostRuntimeDispatch: public AllStatic { static void access_barrier(oop src, oop dst, size_t size) { GCBarrierType::clone_in_heap(src, dst, size); } }; template struct PostRuntimeDispatch: public AllStatic { static oop access_barrier(oop obj) { return GCBarrierType::resolve(obj); } }; template struct PostRuntimeDispatch: public AllStatic { static bool access_barrier(oop o1, oop o2) { return GCBarrierType::equals(o1, o2); } }; // Resolving accessors with barriers from the barrier set happens in two steps. // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off. // 2. Expand paths for each BarrierSet available in the system. template struct BarrierResolver: public AllStatic { template static typename EnableIf< HasDecorator::value, FunctionPointerT>::type resolve_barrier_gc() { BarrierSet* bs = BarrierSet::barrier_set(); assert(bs != NULL, "GC barriers invoked before BarrierSet is set"); switch (bs->kind()) { #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ case BarrierSet::bs_name: { \ return PostRuntimeDispatch::type:: \ AccessBarrier, barrier_type, ds>::oop_access_barrier; \ } \ break; FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE) #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE default: fatal("BarrierSet AccessBarrier resolving not implemented"); return NULL; }; } template static typename EnableIf< !HasDecorator::value, FunctionPointerT>::type resolve_barrier_gc() { BarrierSet* bs = BarrierSet::barrier_set(); assert(bs != NULL, "GC barriers invoked before BarrierSet is set"); switch (bs->kind()) { #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name) \ case BarrierSet::bs_name: { \ return PostRuntimeDispatch::type:: \ AccessBarrier, barrier_type, ds>::access_barrier; \ } \ break; FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE) #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE default: fatal("BarrierSet AccessBarrier resolving not implemented"); return NULL; }; } static FunctionPointerT resolve_barrier_rt() { if (UseCompressedOops) { const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS; return resolve_barrier_gc(); } else { return resolve_barrier_gc(); } } static FunctionPointerT resolve_barrier() { return resolve_barrier_rt(); } }; // Step 5.a: Barrier resolution // The RuntimeDispatch class is responsible for performing a runtime dispatch of the // accessor. This is required when the access either depends on whether compressed oops // is being used, or it depends on which GC implementation was chosen (e.g. requires GC // barriers). The way it works is that a function pointer initially pointing to an // accessor resolution function gets called for each access. Upon first invocation, // it resolves which accessor to be used in future invocations and patches the // function pointer to this new accessor. template void RuntimeDispatch::store_init(void* addr, T value) { func_t function = BarrierResolver::resolve_barrier(); _store_func = function; function(addr, value); } template void RuntimeDispatch::store_at_init(oop base, ptrdiff_t offset, T value) { func_t function = BarrierResolver::resolve_barrier(); _store_at_func = function; function(base, offset, value); } template T RuntimeDispatch::load_init(void* addr) { func_t function = BarrierResolver::resolve_barrier(); _load_func = function; return function(addr); } template T RuntimeDispatch::load_at_init(oop base, ptrdiff_t offset) { func_t function = BarrierResolver::resolve_barrier(); _load_at_func = function; return function(base, offset); } template T RuntimeDispatch::atomic_cmpxchg_init(T new_value, void* addr, T compare_value) { func_t function = BarrierResolver::resolve_barrier(); _atomic_cmpxchg_func = function; return function(new_value, addr, compare_value); } template T RuntimeDispatch::atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) { func_t function = BarrierResolver::resolve_barrier(); _atomic_cmpxchg_at_func = function; return function(new_value, base, offset, compare_value); } template T RuntimeDispatch::atomic_xchg_init(T new_value, void* addr) { func_t function = BarrierResolver::resolve_barrier(); _atomic_xchg_func = function; return function(new_value, addr); } template T RuntimeDispatch::atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) { func_t function = BarrierResolver::resolve_barrier(); _atomic_xchg_at_func = function; return function(new_value, base, offset); } template bool RuntimeDispatch::arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) { func_t function = BarrierResolver::resolve_barrier(); _arraycopy_func = function; return function(src_obj, dst_obj, src, dst, length); } template void RuntimeDispatch::clone_init(oop src, oop dst, size_t size) { func_t function = BarrierResolver::resolve_barrier(); _clone_func = function; function(src, dst, size); } template oop RuntimeDispatch::resolve_init(oop obj) { func_t function = BarrierResolver::resolve_barrier(); _resolve_func = function; return function(obj); } template bool RuntimeDispatch::equals_init(oop o1, oop o2) { func_t function = BarrierResolver::resolve_barrier(); _equals_func = function; return function(o1, o2); } } #endif // SHARE_OOPS_ACCESS_INLINE_HPP