< prev index next >

src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp

Print this page
rev 49986 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by: lucy, rehn, dholmes

*** 1,8 **** /* ! * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. ! * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,8 ---- /* ! * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. ! * Copyright (c) 2016, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 66,92 **** // instruction is retried as often as required. // // The return value of the method is the value that was successfully stored. At the // time the caller receives back control, the value in memory may have changed already. template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGFR 0,%[inc] \n\t" // save increment " LA 3,%[mem] \n\t" // force data address into ARG2 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value // " LAA 2,0,0(3) \n\t" // actually coded instruction --- 66,100 ---- // instruction is retried as often as required. // // The return value of the method is the value that was successfully stored. At the // time the caller receives back control, the value in memory may have changed already. + // New atomic operations only include specific-operand-serialization, not full + // memory barriers. We can use the Fast-BCR-Serialization Facility for them. + inline void z196_fast_sync() { + __asm__ __volatile__ ("bcr 14, 0" : : : "memory"); + } + template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { + if (order == memory_order_conservative) { z196_fast_sync(); } __asm__ __volatile__ ( " LGFR 0,%[inc] \n\t" // save increment " LA 3,%[mem] \n\t" // force data address into ARG2 // " LAA %[upd],%[inc],%[mem] \n\t" // increment and get old value // " LAA 2,0,0(3) \n\t" // actually coded instruction
*** 104,113 **** --- 112,122 ---- //---< inputs >--- : [inc] "a" (inc) // read-only. //---< clobbered >--- : "cc", "r0", "r2", "r3", "memory" ); + if (order == memory_order_conservative) { z196_fast_sync(); } } else { __asm__ __volatile__ ( " LLGF %[old],%[mem] \n\t" // get old value "0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result " CS %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
*** 127,143 **** } template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { __asm__ __volatile__ ( " LGR 0,%[inc] \n\t" // save increment " LA 3,%[mem] \n\t" // force data address into ARG2 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value // " LAAG 2,0,0(3) \n\t" // actually coded instruction --- 136,154 ---- } template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old, upd; if (VM_Version::has_LoadAndALUAtomicV1()) { + if (order == memory_order_conservative) { z196_fast_sync(); } __asm__ __volatile__ ( " LGR 0,%[inc] \n\t" // save increment " LA 3,%[mem] \n\t" // force data address into ARG2 // " LAAG %[upd],%[inc],%[mem] \n\t" // increment and get old value // " LAAG 2,0,0(3) \n\t" // actually coded instruction
*** 155,164 **** --- 166,176 ---- //---< inputs >--- : [inc] "a" (inc) // read-only. //---< clobbered >--- : "cc", "r0", "r2", "r3", "memory" ); + if (order == memory_order_conservative) { z196_fast_sync(); } } else { __asm__ __volatile__ ( " LG %[old],%[mem] \n\t" // get old value "0: LA %[upd],0(%[inc],%[old]) \n\t" // calc result " CSG %[old],%[upd],%[mem] \n\t" // try to xchg res with mem
*** 195,205 **** // The return value is the (unchanged) value from memory as it was when the // replacement succeeded. template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); T old; __asm__ __volatile__ ( " LLGF %[old],%[mem] \n\t" // get old value --- 207,218 ---- // The return value is the (unchanged) value from memory as it was when the // replacement succeeded. template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; __asm__ __volatile__ ( " LLGF %[old],%[mem] \n\t" // get old value
*** 218,228 **** } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); T old; __asm__ __volatile__ ( " LG %[old],%[mem] \n\t" // get old value --- 231,242 ---- } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; __asm__ __volatile__ ( " LG %[old],%[mem] \n\t" // get old value
*** 276,286 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val, T volatile* dest, T cmp_val, ! cmpxchg_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; __asm__ __volatile__ ( " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. --- 290,300 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T xchg_val, T volatile* dest, T cmp_val, ! atomic_memory_order unused) const { STATIC_ASSERT(4 == sizeof(T)); T old; __asm__ __volatile__ ( " CS %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
*** 300,310 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val, T volatile* dest, T cmp_val, ! cmpxchg_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem. --- 314,324 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T xchg_val, T volatile* dest, T cmp_val, ! atomic_memory_order unused) const { STATIC_ASSERT(8 == sizeof(T)); T old; __asm__ __volatile__ ( " CSG %[old],%[upd],%[mem] \n\t" // Try to xchg upd with mem.
< prev index next >