--- old/src/hotspot/cpu/arm/relocInfo_arm.cpp 2018-06-04 20:52:57.617962640 -0400 +++ new/src/hotspot/cpu/arm/relocInfo_arm.cpp 2018-06-04 20:52:57.247928338 -0400 @@ -29,7 +29,7 @@ #include "nativeInst_arm.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/safepoint.hpp" void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) { --- old/src/hotspot/cpu/ppc/nativeInst_ppc.cpp 2018-06-04 20:52:58.074004915 -0400 +++ new/src/hotspot/cpu/ppc/nativeInst_ppc.cpp 2018-06-04 20:52:57.704970706 -0400 @@ -30,7 +30,7 @@ #include "oops/compressedOops.inline.hpp" #include "oops/oop.hpp" #include "runtime/handles.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "utilities/ostream.hpp" --- old/src/hotspot/cpu/zero/cppInterpreter_zero.cpp 2018-06-04 20:52:58.536047747 -0400 +++ new/src/hotspot/cpu/zero/cppInterpreter_zero.cpp 2018-06-04 20:52:58.166013445 -0400 @@ -43,7 +43,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/synchronizer.hpp" --- old/src/hotspot/os/aix/os_aix.cpp 2018-06-04 20:52:59.017092340 -0400 +++ new/src/hotspot/os/aix/os_aix.cpp 2018-06-04 20:52:58.647058038 -0400 @@ -59,7 +59,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" --- old/src/hotspot/os/bsd/os_bsd.cpp 2018-06-04 20:52:59.564143051 -0400 +++ new/src/hotspot/os/bsd/os_bsd.cpp 2018-06-04 20:52:59.190108378 -0400 @@ -49,7 +49,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/semaphore.hpp" --- old/src/hotspot/os/linux/os_linux.cpp 2018-06-04 20:53:00.096192372 -0400 +++ new/src/hotspot/os/linux/os_linux.cpp 2018-06-04 20:52:59.724157885 -0400 @@ -51,7 +51,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/os/solaris/os_solaris.cpp 2018-06-04 20:53:00.673245865 -0400 +++ new/src/hotspot/os/solaris/os_solaris.cpp 2018-06-04 20:53:00.302211470 -0400 @@ -49,7 +49,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/os/windows/os_windows.cpp 2018-06-04 20:53:01.241298524 -0400 +++ new/src/hotspot/os/windows/os_windows.cpp 2018-06-04 20:53:00.867263851 -0400 @@ -52,7 +52,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/perfMemory.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/share/classfile/classLoader.inline.hpp 2018-06-04 20:53:01.812351460 -0400 +++ new/src/hotspot/share/classfile/classLoader.inline.hpp 2018-06-04 20:53:01.438316787 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_CLASSFILE_CLASSLOADER_INLINE_HPP #include "classfile/classLoader.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" // Next entry in class path inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); } --- old/src/hotspot/share/classfile/dictionary.cpp 2018-06-04 20:53:02.271394014 -0400 +++ new/src/hotspot/share/classfile/dictionary.cpp 2018-06-04 20:53:01.898359433 -0400 @@ -35,7 +35,7 @@ #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/safepointVerifiers.hpp" #include "utilities/hashtable.inline.hpp" --- old/src/hotspot/share/classfile/dictionary.inline.hpp 2018-06-04 20:53:02.738437309 -0400 +++ new/src/hotspot/share/classfile/dictionary.inline.hpp 2018-06-04 20:53:02.369403099 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_CLASSFILE_DICTIONARY_INLINE_HPP #include "classfile/dictionary.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" inline ProtectionDomainEntry* DictionaryEntry::pd_set_acquire() const { return OrderAccess::load_acquire(&_pd_set); --- old/src/hotspot/share/classfile/systemDictionary.cpp 2018-06-04 20:53:03.191479306 -0400 +++ new/src/hotspot/share/classfile/systemDictionary.cpp 2018-06-04 20:53:02.820444911 -0400 @@ -76,7 +76,7 @@ #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "services/classLoadingService.hpp" --- old/src/hotspot/share/classfile/verifier.cpp 2018-06-04 20:53:03.714527792 -0400 +++ new/src/hotspot/share/classfile/verifier.cpp 2018-06-04 20:53:03.340493119 -0400 @@ -47,7 +47,7 @@ #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/thread.hpp" --- old/src/hotspot/share/code/nmethod.cpp 2018-06-04 20:53:04.235576093 -0400 +++ new/src/hotspot/share/code/nmethod.cpp 2018-06-04 20:53:03.865541791 -0400 @@ -51,7 +51,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/share/gc/cms/adaptiveFreeList.cpp 2018-06-04 20:53:04.747623560 -0400 +++ new/src/hotspot/share/gc/cms/adaptiveFreeList.cpp 2018-06-04 20:53:04.379589443 -0400 @@ -29,7 +29,7 @@ #include "memory/freeList.inline.hpp" #include "runtime/globals.hpp" #include "runtime/mutex.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/vmThread.hpp" template <> --- old/src/hotspot/share/gc/cms/cmsCardTable.cpp 2018-06-04 20:53:05.200665557 -0400 +++ new/src/hotspot/share/gc/cms/cmsCardTable.cpp 2018-06-04 20:53:04.830631255 -0400 @@ -34,7 +34,7 @@ #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/vmThread.hpp" CMSCardTable::CMSCardTable(MemRegion whole_heap) : --- old/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp 2018-06-04 20:53:05.665708667 -0400 +++ new/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp 2018-06-04 20:53:05.294674272 -0400 @@ -45,7 +45,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/vmThread.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" --- old/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2018-06-04 20:53:06.189757246 -0400 +++ new/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2018-06-04 20:53:05.815722573 -0400 @@ -74,7 +74,7 @@ #include "runtime/globals_extension.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/timer.hpp" #include "runtime/vmThread.hpp" #include "services/memoryService.hpp" --- old/src/hotspot/share/gc/g1/g1AllocRegion.cpp 2018-06-04 20:53:06.815815282 -0400 +++ new/src/hotspot/share/gc/g1/g1AllocRegion.cpp 2018-06-04 20:53:06.445780980 -0400 @@ -29,7 +29,7 @@ #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/resourceArea.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/align.hpp" G1CollectedHeap* G1AllocRegion::_g1h = NULL; --- old/src/hotspot/share/gc/g1/g1CardTable.cpp 2018-06-04 20:53:07.281858484 -0400 +++ new/src/hotspot/share/gc/g1/g1CardTable.cpp 2018-06-04 20:53:06.904823533 -0400 @@ -28,7 +28,7 @@ #include "gc/shared/memset_with_concurrent_readers.hpp" #include "logging/log.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" bool G1CardTable::mark_card_deferred(size_t card_index) { jbyte val = _byte_map[card_index]; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-06-04 20:53:07.741901130 -0400 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-06-04 20:53:07.367866457 -0400 @@ -86,7 +86,7 @@ #include "runtime/flags/flagSetting.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/threadSMR.hpp" #include "runtime/vmThread.hpp" #include "utilities/align.hpp" --- old/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp 2018-06-04 20:53:08.296952583 -0400 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp 2018-06-04 20:53:07.927918374 -0400 @@ -32,7 +32,7 @@ #include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp" #include "gc/shared/taskqueue.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { switch (dest.value()) { --- old/src/hotspot/share/gc/g1/heapRegion.cpp 2018-06-04 20:53:08.753994951 -0400 +++ new/src/hotspot/share/gc/g1/heapRegion.cpp 2018-06-04 20:53:08.384960742 -0400 @@ -43,7 +43,7 @@ #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/growableArray.hpp" int HeapRegion::LogOfHRGrainBytes = 0; --- old/src/hotspot/share/gc/parallel/gcTaskManager.cpp 2018-06-04 20:53:09.220038153 -0400 +++ new/src/hotspot/share/gc/parallel/gcTaskManager.cpp 2018-06-04 20:53:08.852004037 -0400 @@ -34,7 +34,7 @@ #include "memory/resourceArea.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" // --- old/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp 2018-06-04 20:53:09.689081634 -0400 +++ new/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp 2018-06-04 20:53:09.322047610 -0400 @@ -27,7 +27,7 @@ #include "gc/shared/cardTableBarrierSet.hpp" #include "gc/shared/cardTable.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" template inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) { --- old/src/hotspot/share/gc/shared/oopStorage.cpp 2018-06-04 20:53:10.143123723 -0400 +++ new/src/hotspot/share/gc/shared/oopStorage.cpp 2018-06-04 20:53:09.773089421 -0400 @@ -33,7 +33,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/safepoint.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.hpp" --- old/src/hotspot/share/gc/shared/space.cpp 2018-06-04 20:53:10.615167482 -0400 +++ new/src/hotspot/share/gc/shared/space.cpp 2018-06-04 20:53:10.247133365 -0400 @@ -36,7 +36,7 @@ #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/java.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" #include "utilities/align.hpp" --- old/src/hotspot/share/gc/shared/taskqueue.inline.hpp 2018-06-04 20:53:11.080210592 -0400 +++ new/src/hotspot/share/gc/shared/taskqueue.inline.hpp 2018-06-04 20:53:10.711176382 -0400 @@ -29,7 +29,7 @@ #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" #include "utilities/stack.inline.hpp" --- old/src/hotspot/share/interpreter/bytecodeInterpreter.cpp 2018-06-04 20:53:11.542253423 -0400 +++ new/src/hotspot/share/interpreter/bytecodeInterpreter.cpp 2018-06-04 20:53:11.169218843 -0400 @@ -49,7 +49,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/threadCritical.hpp" #include "utilities/exceptions.hpp" --- old/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp 2018-06-04 20:53:12.072302559 -0400 +++ new/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp 2018-06-04 20:53:11.702268256 -0400 @@ -32,7 +32,7 @@ #include "memory/iterator.hpp" #include "oops/oop.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/ostream.hpp" --- old/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp 2018-06-04 20:53:12.524344463 -0400 +++ new/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp 2018-06-04 20:53:12.156310346 -0400 @@ -38,7 +38,7 @@ #include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.inline.hpp" #include "runtime/safepoint.hpp" --- old/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp 2018-06-04 20:53:12.983387017 -0400 +++ new/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.cpp 2018-06-04 20:53:12.615352899 -0400 @@ -33,7 +33,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/vm_version.hpp" #include "runtime/jniHandles.inline.hpp" #include "runtime/thread.inline.hpp" --- old/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp 2018-06-04 20:53:13.440429384 -0400 +++ new/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp 2018-06-04 20:53:13.070395082 -0400 @@ -27,7 +27,7 @@ #include "jfr/utilities/jfrTypes.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/macros.hpp" #ifdef VM_LITTLE_ENDIAN --- old/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp 2018-06-04 20:53:13.896471660 -0400 +++ new/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.cpp 2018-06-04 20:53:13.528437543 -0400 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp" #include "runtime/safepoint.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" // Alternating epochs on each rotation allow for concurrent tagging. // The regular epoch shift happens only during a safepoint. --- old/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp 2018-06-04 20:53:14.350513749 -0400 +++ new/src/hotspot/share/jfr/recorder/service/jfrPostBox.cpp 2018-06-04 20:53:13.981479540 -0400 @@ -26,7 +26,7 @@ #include "jfr/recorder/service/jfrPostBox.hpp" #include "jfr/utilities/jfrTryLock.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" #define MSG_IS_SYNCHRONOUS ( (MSGBIT(MSG_ROTATE)) | \ --- old/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp 2018-06-04 20:53:14.807556117 -0400 +++ new/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp 2018-06-04 20:53:14.436521722 -0400 @@ -46,7 +46,7 @@ #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" --- old/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp 2018-06-04 20:53:15.269598949 -0400 +++ new/src/hotspot/share/jfr/recorder/storage/jfrBuffer.cpp 2018-06-04 20:53:14.900564739 -0400 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "jfr/recorder/storage/jfrBuffer.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" static const u1* const MUTEX_CLAIM = NULL; --- old/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp 2018-06-04 20:53:15.726641316 -0400 +++ new/src/hotspot/share/jfr/recorder/storage/jfrStorage.cpp 2018-06-04 20:53:15.358607200 -0400 @@ -38,7 +38,7 @@ #include "jfr/writers/jfrNativeEventWriter.hpp" #include "logging/log.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.inline.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.hpp" --- old/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp 2018-06-04 20:53:16.193684611 -0400 +++ new/src/hotspot/share/jfr/recorder/storage/jfrStorageControl.cpp 2018-06-04 20:53:15.825650494 -0400 @@ -26,7 +26,7 @@ #include "jfr/recorder/storage/jfrStorageControl.hpp" #include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" // returns the updated value static jlong atomic_add(size_t value, size_t volatile* const dest) { --- old/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp 2018-06-04 20:53:16.645726516 -0400 +++ new/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp 2018-06-04 20:53:16.276692306 -0400 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "jfr/recorder/storage/jfrVirtualMemory.hpp" #include "memory/virtualspace.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "services/memTracker.hpp" #include "utilities/globalDefinitions.hpp" --- old/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp 2018-06-04 20:53:17.106769254 -0400 +++ new/src/hotspot/share/jfr/recorder/stringpool/jfrStringPool.cpp 2018-06-04 20:53:16.738735138 -0400 @@ -35,7 +35,7 @@ #include "logging/log.hpp" #include "runtime/atomic.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.inline.hpp" --- old/src/hotspot/share/jfr/recorder/stringpool/jfrStringPoolBuffer.cpp 2018-06-04 20:53:17.564811715 -0400 +++ new/src/hotspot/share/jfr/recorder/stringpool/jfrStringPoolBuffer.cpp 2018-06-04 20:53:17.193777320 -0400 @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "jfr/recorder/stringpool/jfrStringPoolBuffer.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" JfrStringPoolBuffer::JfrStringPoolBuffer() : JfrBuffer(), _string_count_pos(0), _string_count_top(0) {} --- old/src/hotspot/share/jfr/utilities/jfrAllocation.cpp 2018-06-04 20:53:18.017853712 -0400 +++ new/src/hotspot/share/jfr/utilities/jfrAllocation.cpp 2018-06-04 20:53:17.648819503 -0400 @@ -28,7 +28,7 @@ #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/vm_version.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" --- old/src/hotspot/share/jfr/utilities/jfrHashtable.hpp 2018-06-04 20:53:18.470895709 -0400 +++ new/src/hotspot/share/jfr/utilities/jfrHashtable.hpp 2018-06-04 20:53:18.101861500 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_JFR_UTILITIES_JFRHASHTABLE_HPP #include "memory/allocation.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" --- old/src/hotspot/share/logging/logOutputList.cpp 2018-06-04 20:53:18.929938262 -0400 +++ new/src/hotspot/share/logging/logOutputList.cpp 2018-06-04 20:53:18.558903867 -0400 @@ -26,7 +26,7 @@ #include "logging/logOutputList.hpp" #include "memory/allocation.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/globalDefinitions.hpp" jint LogOutputList::increase_readers() { --- old/src/hotspot/share/memory/metaspace.cpp 2018-06-04 20:53:19.385980538 -0400 +++ new/src/hotspot/share/memory/metaspace.cpp 2018-06-04 20:53:19.014946143 -0400 @@ -40,7 +40,7 @@ #include "memory/metaspaceTracer.hpp" #include "memory/universe.hpp" #include "runtime/init.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "services/memTracker.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" --- old/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp 2018-06-04 20:53:19.876025965 -0400 +++ new/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp 2018-06-04 20:53:19.506991755 -0400 @@ -15,7 +15,7 @@ #include "memory/metaspace/metaspaceCommon.hpp" #include "memory/metaspace/virtualSpaceList.hpp" #include "memory/metaspace/virtualSpaceNode.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepoint.hpp" --- old/src/hotspot/share/oops/array.inline.hpp 2018-06-04 20:53:20.333068333 -0400 +++ new/src/hotspot/share/oops/array.inline.hpp 2018-06-04 20:53:19.966034309 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_OOPS_ARRAY_INLINE_HPP #include "oops/array.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" template inline T Array::at_acquire(const int which) { return OrderAccess::load_acquire(adr_at(which)); } --- old/src/hotspot/share/oops/arrayKlass.inline.hpp 2018-06-04 20:53:20.783110052 -0400 +++ new/src/hotspot/share/oops/arrayKlass.inline.hpp 2018-06-04 20:53:20.414075842 -0400 @@ -25,7 +25,7 @@ #ifndef SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP #define SHARE_VM_OOPS_ARRAYKLASS_INLINE_HPP -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "oops/arrayKlass.hpp" inline Klass* ArrayKlass::higher_dimension_acquire() const { --- old/src/hotspot/share/oops/constantPool.inline.hpp 2018-06-04 20:53:21.236152049 -0400 +++ new/src/hotspot/share/oops/constantPool.inline.hpp 2018-06-04 20:53:20.866117747 -0400 @@ -27,7 +27,7 @@ #include "oops/constantPool.hpp" #include "oops/cpCache.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" inline CPSlot ConstantPool::slot_at(int which) const { assert(is_within_bounds(which), "index out of bounds"); --- old/src/hotspot/share/oops/cpCache.cpp 2018-06-04 20:53:21.691194231 -0400 +++ new/src/hotspot/share/oops/cpCache.cpp 2018-06-04 20:53:21.321159929 -0400 @@ -42,7 +42,7 @@ #include "prims/methodHandles.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/macros.hpp" // Implementation of ConstantPoolCacheEntry --- old/src/hotspot/share/oops/cpCache.inline.hpp 2018-06-04 20:53:22.163237990 -0400 +++ new/src/hotspot/share/oops/cpCache.inline.hpp 2018-06-04 20:53:21.792203595 -0400 @@ -27,7 +27,7 @@ #include "oops/cpCache.hpp" #include "oops/oopHandle.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" inline int ConstantPoolCacheEntry::indices_ord() const { return OrderAccess::load_acquire(&_indices); } --- old/src/hotspot/share/oops/instanceKlass.cpp 2018-06-04 20:53:22.620280358 -0400 +++ new/src/hotspot/share/oops/instanceKlass.cpp 2018-06-04 20:53:22.250246055 -0400 @@ -71,7 +71,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" #include "services/classLoadingService.hpp" #include "services/threadService.hpp" --- old/src/hotspot/share/oops/instanceKlass.inline.hpp 2018-06-04 20:53:23.156330050 -0400 +++ new/src/hotspot/share/oops/instanceKlass.inline.hpp 2018-06-04 20:53:22.785295655 -0400 @@ -29,7 +29,7 @@ #include "oops/instanceKlass.hpp" #include "oops/klass.hpp" #include "oops/oop.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" --- old/src/hotspot/share/oops/klass.cpp 2018-06-04 20:53:23.617372788 -0400 +++ new/src/hotspot/share/oops/klass.cpp 2018-06-04 20:53:23.243338115 -0400 @@ -43,7 +43,7 @@ #include "oops/oopHandle.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/macros.hpp" #include "utilities/stack.inline.hpp" --- old/src/hotspot/share/oops/method.cpp 2018-06-04 20:53:24.085416175 -0400 +++ new/src/hotspot/share/oops/method.cpp 2018-06-04 20:53:23.714381781 -0400 @@ -54,7 +54,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/relocator.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/share/oops/method.inline.hpp 2018-06-04 20:53:24.588462809 -0400 +++ new/src/hotspot/share/oops/method.inline.hpp 2018-06-04 20:53:24.217428414 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_OOPS_METHOD_INLINE_HPP #include "oops/method.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" inline address Method::from_compiled_entry() const { return OrderAccess::load_acquire(&_from_compiled_entry); --- old/src/hotspot/share/oops/methodData.cpp 2018-06-04 20:53:25.040504713 -0400 +++ new/src/hotspot/share/oops/methodData.cpp 2018-06-04 20:53:24.668470225 -0400 @@ -37,7 +37,7 @@ #include "runtime/compilationPolicy.hpp" #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/safepointVerifiers.hpp" #include "utilities/align.hpp" #include "utilities/copy.hpp" --- old/src/hotspot/share/oops/methodData.inline.hpp 2018-06-04 20:53:25.525549677 -0400 +++ new/src/hotspot/share/oops/methodData.inline.hpp 2018-06-04 20:53:25.154515282 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_OOPS_METHODDATA_INLINE_HPP #include "oops/methodData.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" inline void DataLayout::release_set_cell_at(int index, intptr_t value) { OrderAccess::release_store(&_cells[index], value); --- old/src/hotspot/share/oops/oop.inline.hpp 2018-06-04 20:53:25.976591489 -0400 +++ new/src/hotspot/share/oops/oop.inline.hpp 2018-06-04 20:53:25.606557186 -0400 @@ -35,7 +35,7 @@ #include "oops/markOop.inline.hpp" #include "oops/oop.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" --- old/src/hotspot/share/precompiled/precompiled.hpp 2018-06-04 20:53:26.437634227 -0400 +++ new/src/hotspot/share/precompiled/precompiled.hpp 2018-06-04 20:53:26.067599925 -0400 @@ -187,7 +187,7 @@ # include "runtime/mutexLocker.hpp" # include "runtime/objectMonitor.hpp" # include "runtime/orderAccess.hpp" -# include "runtime/orderAccess.inline.hpp" +# include "runtime/orderAccess.hpp" # include "runtime/os.hpp" # include "runtime/osThread.hpp" # include "runtime/perfData.hpp" --- old/src/hotspot/share/prims/jni.cpp 2018-06-04 20:53:26.896676781 -0400 +++ new/src/hotspot/share/prims/jni.cpp 2018-06-04 20:53:26.525642386 -0400 @@ -73,7 +73,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/reflection.hpp" #include "runtime/safepointVerifiers.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/share/prims/jvm.cpp 2018-06-04 20:53:27.439727121 -0400 +++ new/src/hotspot/share/prims/jvm.cpp 2018-06-04 20:53:27.067692634 -0400 @@ -64,7 +64,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.inline.hpp" #include "runtime/perfData.hpp" #include "runtime/reflection.hpp" --- old/src/hotspot/share/prims/jvmtiRawMonitor.cpp 2018-06-04 20:53:27.966775979 -0400 +++ new/src/hotspot/share/prims/jvmtiRawMonitor.cpp 2018-06-04 20:53:27.595741584 -0400 @@ -27,7 +27,7 @@ #include "prims/jvmtiRawMonitor.hpp" #include "runtime/atomic.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" GrowableArray *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(1,true); --- old/src/hotspot/share/prims/unsafe.cpp 2018-06-04 20:53:28.425818532 -0400 +++ new/src/hotspot/share/prims/unsafe.cpp 2018-06-04 20:53:28.054784137 -0400 @@ -40,7 +40,7 @@ #include "runtime/globals.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/jniHandles.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/reflection.hpp" #include "runtime/thread.hpp" #include "runtime/threadSMR.hpp" --- old/src/hotspot/share/runtime/interfaceSupport.cpp 2018-06-04 20:53:28.898862384 -0400 +++ new/src/hotspot/share/runtime/interfaceSupport.cpp 2018-06-04 20:53:28.528828081 -0400 @@ -31,7 +31,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/init.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.inline.hpp" #include "runtime/thread.inline.hpp" #include "runtime/safepointVerifiers.hpp" --- old/src/hotspot/share/runtime/mutex.cpp 2018-06-04 20:53:29.355904752 -0400 +++ new/src/hotspot/share/runtime/mutex.cpp 2018-06-04 20:53:28.984870356 -0400 @@ -26,7 +26,7 @@ #include "runtime/atomic.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutex.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/thread.inline.hpp" --- old/src/hotspot/share/runtime/objectMonitor.cpp 2018-06-04 20:53:29.839949623 -0400 +++ new/src/hotspot/share/runtime/objectMonitor.cpp 2018-06-04 20:53:29.467915135 -0400 @@ -36,7 +36,7 @@ #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" #include "runtime/objectMonitor.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/safepointMechanism.inline.hpp" #include "runtime/sharedRuntime.hpp" --- old/src/hotspot/share/runtime/orderAccess.hpp 2018-06-04 20:53:30.345996533 -0400 +++ new/src/hotspot/share/runtime/orderAccess.hpp 2018-06-04 20:53:29.974962138 -0400 @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "runtime/atomic.hpp" +#include "utilities/macros.hpp" // Memory Access Ordering Model // @@ -311,4 +312,38 @@ } }; +#include OS_CPU_HEADER(orderAccess) + +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } +template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::fence(); } + + +template +inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) { + ScopedFence f((void*)p); + Atomic::store(v, p); +} + +template +inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) { + ScopedFence f((void*)p); + return Atomic::load(p); +} + +template +inline T OrderAccess::load_acquire(const volatile T* p) { + return LoadImpl >()(p); +} + +template +inline void OrderAccess::release_store(volatile D* p, T v) { + StoreImpl >()(v, p); +} + +template +inline void OrderAccess::release_store_fence(volatile D* p, T v) { + StoreImpl >()(v, p); +} #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP --- old/src/hotspot/share/runtime/perfMemory.cpp 2018-06-04 20:53:30.804038994 -0400 +++ new/src/hotspot/share/runtime/perfMemory.cpp 2018-06-04 20:53:30.434004692 -0400 @@ -30,7 +30,7 @@ #include "runtime/java.hpp" #include "runtime/mutex.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/perfData.hpp" #include "runtime/perfMemory.hpp" --- old/src/hotspot/share/runtime/safepoint.cpp 2018-06-04 20:53:31.268082011 -0400 +++ new/src/hotspot/share/runtime/safepoint.cpp 2018-06-04 20:53:30.888046781 -0400 @@ -50,7 +50,7 @@ #include "runtime/frame.inline.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/safepoint.hpp" #include "runtime/safepointMechanism.inline.hpp" --- old/src/hotspot/share/runtime/sweeper.cpp 2018-06-04 20:53:31.759127531 -0400 +++ new/src/hotspot/share/runtime/sweeper.cpp 2018-06-04 20:53:31.380092394 -0400 @@ -38,7 +38,7 @@ #include "runtime/compilationPolicy.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/mutexLocker.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/sweeper.hpp" #include "runtime/thread.inline.hpp" --- old/src/hotspot/share/runtime/thread.cpp 2018-06-04 20:53:32.230171197 -0400 +++ new/src/hotspot/share/runtime/thread.cpp 2018-06-04 20:53:31.858136709 -0400 @@ -80,7 +80,7 @@ #include "runtime/memprofiler.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/objectMonitor.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/osThread.hpp" #include "runtime/prefetch.inline.hpp" #include "runtime/safepoint.hpp" --- old/src/hotspot/share/runtime/thread.inline.hpp 2018-06-04 20:53:32.783222465 -0400 +++ new/src/hotspot/share/runtime/thread.inline.hpp 2018-06-04 20:53:32.412188070 -0400 @@ -27,7 +27,7 @@ #include "runtime/atomic.hpp" #include "runtime/globals.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.inline.hpp" #include "runtime/thread.hpp" --- old/src/hotspot/share/services/memTracker.cpp 2018-06-04 20:53:33.239264740 -0400 +++ new/src/hotspot/share/services/memTracker.cpp 2018-06-04 20:53:32.867230252 -0400 @@ -25,7 +25,7 @@ #include "jvm.h" #include "runtime/mutex.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/vmThread.hpp" #include "runtime/vm_operations.hpp" #include "services/memBaseline.hpp" --- old/src/hotspot/share/services/memoryManager.cpp 2018-06-04 20:53:33.697307201 -0400 +++ new/src/hotspot/share/services/memoryManager.cpp 2018-06-04 20:53:33.326272806 -0400 @@ -29,7 +29,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "services/lowMemoryDetector.hpp" #include "services/management.hpp" #include "services/memoryManager.hpp" --- old/src/hotspot/share/services/memoryPool.cpp 2018-06-04 20:53:34.154349569 -0400 +++ new/src/hotspot/share/services/memoryPool.cpp 2018-06-04 20:53:33.784315266 -0400 @@ -29,7 +29,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/javaCalls.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "services/lowMemoryDetector.hpp" #include "services/management.hpp" #include "services/memoryManager.hpp" --- old/src/hotspot/share/utilities/concurrentHashTable.inline.hpp 2018-06-04 20:53:34.609391752 -0400 +++ new/src/hotspot/share/utilities/concurrentHashTable.inline.hpp 2018-06-04 20:53:34.239357449 -0400 @@ -27,7 +27,7 @@ #include "memory/allocation.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/prefetch.inline.hpp" #include "utilities/concurrentHashTable.hpp" #include "utilities/globalCounter.inline.hpp" --- old/src/hotspot/share/utilities/globalCounter.cpp 2018-06-04 20:53:35.084435788 -0400 +++ new/src/hotspot/share/utilities/globalCounter.cpp 2018-06-04 20:53:34.714401486 -0400 @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "utilities/globalCounter.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.hpp" #include "runtime/threadSMR.inline.hpp" #include "runtime/vmThread.hpp" --- old/src/hotspot/share/utilities/globalCounter.inline.hpp 2018-06-04 20:53:35.535477600 -0400 +++ new/src/hotspot/share/utilities/globalCounter.inline.hpp 2018-06-04 20:53:35.165443297 -0400 @@ -25,7 +25,7 @@ #ifndef SHARE_UTILITIES_GLOBAL_COUNTER_INLINE_HPP #define SHARE_UTILITIES_GLOBAL_COUNTER_INLINE_HPP -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/thread.inline.hpp" #include "utilities/globalCounter.hpp" --- old/src/hotspot/share/utilities/hashtable.inline.hpp 2018-06-04 20:53:35.987519504 -0400 +++ new/src/hotspot/share/utilities/hashtable.inline.hpp 2018-06-04 20:53:35.617485202 -0400 @@ -26,7 +26,7 @@ #define SHARE_VM_UTILITIES_HASHTABLE_INLINE_HPP #include "memory/allocation.inline.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "utilities/hashtable.hpp" #include "utilities/dtrace.hpp" --- old/test/hotspot/gtest/utilities/test_globalCounter.cpp 2018-06-04 20:53:36.444561872 -0400 +++ new/test/hotspot/gtest/utilities/test_globalCounter.cpp 2018-06-04 20:53:36.070527199 -0400 @@ -23,7 +23,7 @@ #include "precompiled.hpp" #include "runtime/atomic.hpp" -#include "runtime/orderAccess.inline.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "utilities/globalCounter.hpp" #include "utilities/globalCounter.inline.hpp" --- old/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp 2018-06-04 20:53:37.133625748 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,93 +0,0 @@ -/* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2014 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP -#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP - -#include "runtime/orderAccess.hpp" - -// Compiler version last used for testing: xlc 12 -// Please update this information when this file changes - -// Implementation of class OrderAccess. - -// -// Machine barrier instructions: -// -// - sync Two-way memory barrier, aka fence. -// - lwsync orders Store|Store, -// Load|Store, -// Load|Load, -// but not Store|Load -// - eieio orders Store|Store -// - isync Invalidates speculatively executed instructions, -// but isync may complete before storage accesses -// associated with instructions preceding isync have -// been performed. -// -// Semantic barrier instructions: -// (as defined in orderAccess.hpp) -// -// - release orders Store|Store, (maps to lwsync) -// Load|Store -// - acquire orders Load|Store, (maps to lwsync) -// Load|Load -// - fence orders Store|Store, (maps to sync) -// Load|Store, -// Load|Load, -// Store|Load -// - -#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory"); -#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); -#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); -#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); -// Use twi-isync for load_acquire (faster than lwsync). -// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"): -// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); -#define inlasm_acquire_reg(X) inlasm_lwsync(); - -inline void OrderAccess::loadload() { inlasm_lwsync(); } -inline void OrderAccess::storestore() { inlasm_lwsync(); } -inline void OrderAccess::loadstore() { inlasm_lwsync(); } -inline void OrderAccess::storeload() { inlasm_sync(); } - -inline void OrderAccess::acquire() { inlasm_lwsync(); } -inline void OrderAccess::release() { inlasm_lwsync(); } -inline void OrderAccess::fence() { inlasm_sync(); } - -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } -}; - -#undef inlasm_sync -#undef inlasm_lwsync -#undef inlasm_eieio -#undef inlasm_isync - -#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp 2018-06-04 20:53:36.527569567 -0400 @@ -0,0 +1,93 @@ +/* + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP +#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP + +#include "runtime/orderAccess.hpp" + +// Compiler version last used for testing: xlc 12 +// Please update this information when this file changes + +// Implementation of class OrderAccess. + +// +// Machine barrier instructions: +// +// - sync Two-way memory barrier, aka fence. +// - lwsync orders Store|Store, +// Load|Store, +// Load|Load, +// but not Store|Load +// - eieio orders Store|Store +// - isync Invalidates speculatively executed instructions, +// but isync may complete before storage accesses +// associated with instructions preceding isync have +// been performed. +// +// Semantic barrier instructions: +// (as defined in orderAccess.hpp) +// +// - release orders Store|Store, (maps to lwsync) +// Load|Store +// - acquire orders Load|Store, (maps to lwsync) +// Load|Load +// - fence orders Store|Store, (maps to sync) +// Load|Store, +// Load|Load, +// Store|Load +// + +#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory"); +#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); +#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); +#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); +// Use twi-isync for load_acquire (faster than lwsync). +// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"): +// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); +#define inlasm_acquire_reg(X) inlasm_lwsync(); + +inline void OrderAccess::loadload() { inlasm_lwsync(); } +inline void OrderAccess::storestore() { inlasm_lwsync(); } +inline void OrderAccess::loadstore() { inlasm_lwsync(); } +inline void OrderAccess::storeload() { inlasm_sync(); } + +inline void OrderAccess::acquire() { inlasm_lwsync(); } +inline void OrderAccess::release() { inlasm_lwsync(); } +inline void OrderAccess::fence() { inlasm_sync(); } + +template +struct OrderAccess::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } +}; + +#undef inlasm_sync +#undef inlasm_lwsync +#undef inlasm_eieio +#undef inlasm_isync + +#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP --- old/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp 2018-06-04 20:53:37.830690367 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP -#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP - -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" - -// Compiler version last used for testing: clang 5.1 -// Please update this information when this file changes - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions -static inline void compiler_barrier() { - __asm__ volatile ("" : : : "memory"); -} - -// x86 is TSO and hence only needs a fence for storeload -// However, a compiler barrier is still needed to prevent reordering -// between volatile and non-volatile memory accesses. - -// Implementation of class OrderAccess. - -inline void OrderAccess::loadload() { compiler_barrier(); } -inline void OrderAccess::storestore() { compiler_barrier(); } -inline void OrderAccess::loadstore() { compiler_barrier(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { compiler_barrier(); } -inline void OrderAccess::release() { compiler_barrier(); } - -inline void OrderAccess::fence() { - if (os::is_MP()) { - // always use locked addl since mfence is sometimes expensive -#ifdef AMD64 - __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); -#else - __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); -#endif - } - compiler_barrier(); -} - -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgw (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -#ifdef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; -#endif // AMD64 - -#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp 2018-06-04 20:53:37.225634278 -0400 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP +#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP + +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" + +// Compiler version last used for testing: clang 5.1 +// Please update this information when this file changes + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +static inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + +// x86 is TSO and hence only needs a fence for storeload +// However, a compiler barrier is still needed to prevent reordering +// between volatile and non-volatile memory accesses. + +// Implementation of class OrderAccess. + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } + +inline void OrderAccess::fence() { + // always use locked addl since mfence is sometimes expensive +#ifdef AMD64 + __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); +#else + __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); +#endif + compiler_barrier(); +} + +template<> +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgb (%2),%0" + : "=q" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgw (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +#ifdef AMD64 +template<> +struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgq (%2), %0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; +#endif // AMD64 + +#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP --- old/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp 2018-06-04 20:53:38.527754984 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2009 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP -#define OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP - -#include "runtime/orderAccess.hpp" - -#ifdef ARM - -/* - * ARM Kernel helper for memory barrier. - * Using __asm __volatile ("":::"memory") does not work reliable on ARM - * and gcc __sync_synchronize(); implementation does not use the kernel - * helper for all gcc versions so it is unreliable to use as well. - */ -typedef void (__kernel_dmb_t) (void); -#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) - -#define FULL_MEM_BARRIER __kernel_dmb() -#define LIGHT_MEM_BARRIER __kernel_dmb() - -#else // ARM - -#define FULL_MEM_BARRIER __sync_synchronize() - -#ifdef PPC - -#ifdef __NO_LWSYNC__ -#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory") -#else -#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory") -#endif - -#else // PPC - -#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory") - -#endif // PPC - -#endif // ARM - -// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient -// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore. - -inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::storeload() { FULL_MEM_BARRIER; } - -inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::fence() { FULL_MEM_BARRIER; } - -#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp 2018-06-04 20:53:37.923698988 -0400 @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright 2007, 2008, 2009 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP +#define OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP + +#include "runtime/orderAccess.hpp" + +#ifdef ARM + +/* + * ARM Kernel helper for memory barrier. + * Using __asm __volatile ("":::"memory") does not work reliable on ARM + * and gcc __sync_synchronize(); implementation does not use the kernel + * helper for all gcc versions so it is unreliable to use as well. + */ +typedef void (__kernel_dmb_t) (void); +#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) + +#define FULL_MEM_BARRIER __kernel_dmb() +#define LIGHT_MEM_BARRIER __kernel_dmb() + +#else // ARM + +#define FULL_MEM_BARRIER __sync_synchronize() + +#ifdef PPC + +#ifdef __NO_LWSYNC__ +#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory") +#else +#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory") +#endif + +#else // PPC + +#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory") + +#endif // PPC + +#endif // ARM + +// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient +// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore. + +inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storeload() { FULL_MEM_BARRIER; } + +inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::fence() { FULL_MEM_BARRIER; } + +#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP --- old/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp 2018-06-04 20:53:39.224819603 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, Red Hat Inc. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP -#define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP - -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" -#include "vm_version_aarch64.hpp" - -// Implementation of class OrderAccess. - -inline void OrderAccess::loadload() { acquire(); } -inline void OrderAccess::storestore() { release(); } -inline void OrderAccess::loadstore() { acquire(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { - READ_MEM_BARRIER; -} - -inline void OrderAccess::release() { - WRITE_MEM_BARRIER; -} - -inline void OrderAccess::fence() { - FULL_MEM_BARRIER; -} - -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } -}; - -template -struct OrderAccess::PlatformOrderedStore -{ - template - void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); } -}; - -template -struct OrderAccess::PlatformOrderedStore -{ - template - void operator()(T v, volatile T* p) const { release_store(p, v); fence(); } -}; - -#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp 2018-06-04 20:53:38.618763421 -0400 @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, Red Hat Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP +#define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP + +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" +#include "vm_version_aarch64.hpp" + +// Implementation of class OrderAccess. + +inline void OrderAccess::loadload() { acquire(); } +inline void OrderAccess::storestore() { release(); } +inline void OrderAccess::loadstore() { acquire(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { + READ_MEM_BARRIER; +} + +inline void OrderAccess::release() { + WRITE_MEM_BARRIER; +} + +inline void OrderAccess::fence() { + FULL_MEM_BARRIER; +} + +template +struct OrderAccess::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; } +}; + +template +struct OrderAccess::PlatformOrderedStore +{ + template + void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); } +}; + +template +struct OrderAccess::PlatformOrderedStore +{ + template + void operator()(T v, volatile T* p) const { release_store(p, v); fence(); } +}; + +#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP --- old/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp 2018-06-04 20:53:39.918883943 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,247 +0,0 @@ -/* - * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP -#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP - -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" -#include "vm_version_arm.hpp" - -// Implementation of class OrderAccess. -// - we define the high level barriers below and use the general -// implementation in orderAccess.inline.hpp, with customizations -// on AARCH64 via the specialized_* template functions - -// Memory Ordering on ARM is weak. -// -// Implement all 4 memory ordering barriers by DMB, since it is a -// lighter version of DSB. -// dmb_sy implies full system shareability domain. RD/WR access type. -// dmb_st implies full system shareability domain. WR only access type. -// -// NOP on < ARMv6 (MP not supported) -// -// Non mcr instructions can be used if we build for armv7 or higher arch -// __asm__ __volatile__ ("dmb" : : : "memory"); -// __asm__ __volatile__ ("dsb" : : : "memory"); -// -// inline void _OrderAccess_dsb() { -// volatile intptr_t dummy = 0; -// if (os::is_MP()) { -// __asm__ volatile ( -// "mcr p15, 0, %0, c7, c10, 4" -// : : "r" (dummy) : "memory"); -// } -// } - -inline static void dmb_sy() { - if (!os::is_MP()) { - return; - } -#ifdef AARCH64 - __asm__ __volatile__ ("dmb sy" : : : "memory"); -#else - if (VM_Version::arm_arch() >= 7) { -#ifdef __thumb__ - __asm__ volatile ( - "dmb sy": : : "memory"); -#else - __asm__ volatile ( - ".word 0xF57FF050 | 0xf" : : : "memory"); -#endif - } else { - intptr_t zero = 0; - __asm__ volatile ( - "mcr p15, 0, %0, c7, c10, 5" - : : "r" (zero) : "memory"); - } -#endif -} - -inline static void dmb_st() { - if (!os::is_MP()) { - return; - } -#ifdef AARCH64 - __asm__ __volatile__ ("dmb st" : : : "memory"); -#else - if (VM_Version::arm_arch() >= 7) { -#ifdef __thumb__ - __asm__ volatile ( - "dmb st": : : "memory"); -#else - __asm__ volatile ( - ".word 0xF57FF050 | 0xe" : : : "memory"); -#endif - } else { - intptr_t zero = 0; - __asm__ volatile ( - "mcr p15, 0, %0, c7, c10, 5" - : : "r" (zero) : "memory"); - } -#endif -} - -// Load-Load/Store barrier -inline static void dmb_ld() { -#ifdef AARCH64 - if (!os::is_MP()) { - return; - } - __asm__ __volatile__ ("dmb ld" : : : "memory"); -#else - dmb_sy(); -#endif -} - - -inline void OrderAccess::loadload() { dmb_ld(); } -inline void OrderAccess::loadstore() { dmb_ld(); } -inline void OrderAccess::acquire() { dmb_ld(); } -inline void OrderAccess::storestore() { dmb_st(); } -inline void OrderAccess::storeload() { dmb_sy(); } -inline void OrderAccess::release() { dmb_sy(); } -inline void OrderAccess::fence() { dmb_sy(); } - -// specializations for Aarch64 -// TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach - -#ifdef AARCH64 - -template<> -struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE> -{ - template - T operator()(const volatile T* p) const { - volatile T result; - __asm__ volatile( - "ldarb %w[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; - } -}; - -template<> -struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE> -{ - template - T operator()(const volatile T* p) const { - volatile T result; - __asm__ volatile( - "ldarh %w[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; - } -}; - -template<> -struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE> -{ - template - T operator()(const volatile T* p) const { - volatile T result; - __asm__ volatile( - "ldar %w[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; - } -}; - -template<> -struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE> -{ - template - T operator()(const volatile T* p) const { - volatile T result; - __asm__ volatile( - "ldar %[res], [%[ptr]]" - : [res] "=&r" (result) - : [ptr] "r" (p) - : "memory"); - return result; - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile( - "stlrb %w[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile( - "stlrh %w[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile( - "stlr %w[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile( - "stlr %[val], [%[ptr]]" - : - : [ptr] "r" (p), [val] "r" (v) - : "memory"); - } -}; - -#endif // AARCH64 - -#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp 2018-06-04 20:53:39.315828039 -0400 @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP +#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP + +#include "runtime/orderAccess.hpp" +#include "runtime/os.hpp" +#include "vm_version_arm.hpp" + +// Implementation of class OrderAccess. +// - we define the high level barriers below and use the general +// implementation in orderAccess.hpp, with customizations +// on AARCH64 via the specialized_* template functions + +// Memory Ordering on ARM is weak. +// +// Implement all 4 memory ordering barriers by DMB, since it is a +// lighter version of DSB. +// dmb_sy implies full system shareability domain. RD/WR access type. +// dmb_st implies full system shareability domain. WR only access type. +// +// NOP on < ARMv6 (MP not supported) +// +// Non mcr instructions can be used if we build for armv7 or higher arch +// __asm__ __volatile__ ("dmb" : : : "memory"); +// __asm__ __volatile__ ("dsb" : : : "memory"); +// +// inline void _OrderAccess_dsb() { +// volatile intptr_t dummy = 0; +// if (os::is_MP()) { +// __asm__ volatile ( +// "mcr p15, 0, %0, c7, c10, 4" +// : : "r" (dummy) : "memory"); +// } +// } + +inline static void dmb_sy() { + if (!os::is_MP()) { + return; + } +#ifdef AARCH64 + __asm__ __volatile__ ("dmb sy" : : : "memory"); +#else + if (VM_Version::arm_arch() >= 7) { +#ifdef __thumb__ + __asm__ volatile ( + "dmb sy": : : "memory"); +#else + __asm__ volatile ( + ".word 0xF57FF050 | 0xf" : : : "memory"); +#endif + } else { + intptr_t zero = 0; + __asm__ volatile ( + "mcr p15, 0, %0, c7, c10, 5" + : : "r" (zero) : "memory"); + } +#endif +} + +inline static void dmb_st() { + if (!os::is_MP()) { + return; + } +#ifdef AARCH64 + __asm__ __volatile__ ("dmb st" : : : "memory"); +#else + if (VM_Version::arm_arch() >= 7) { +#ifdef __thumb__ + __asm__ volatile ( + "dmb st": : : "memory"); +#else + __asm__ volatile ( + ".word 0xF57FF050 | 0xe" : : : "memory"); +#endif + } else { + intptr_t zero = 0; + __asm__ volatile ( + "mcr p15, 0, %0, c7, c10, 5" + : : "r" (zero) : "memory"); + } +#endif +} + +// Load-Load/Store barrier +inline static void dmb_ld() { +#ifdef AARCH64 + if (!os::is_MP()) { + return; + } + __asm__ __volatile__ ("dmb ld" : : : "memory"); +#else + dmb_sy(); +#endif +} + + +inline void OrderAccess::loadload() { dmb_ld(); } +inline void OrderAccess::loadstore() { dmb_ld(); } +inline void OrderAccess::acquire() { dmb_ld(); } +inline void OrderAccess::storestore() { dmb_st(); } +inline void OrderAccess::storeload() { dmb_sy(); } +inline void OrderAccess::release() { dmb_sy(); } +inline void OrderAccess::fence() { dmb_sy(); } + +// specializations for Aarch64 +// TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach + +#ifdef AARCH64 + +template<> +struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE> +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldarb %w[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE> +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldarh %w[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE> +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldar %w[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE> +{ + template + T operator()(const volatile T* p) const { + volatile T result; + __asm__ volatile( + "ldar %[res], [%[ptr]]" + : [res] "=&r" (result) + : [ptr] "r" (p) + : "memory"); + return result; + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlrb %w[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlrh %w[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlr %w[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile( + "stlr %[val], [%[ptr]]" + : + : [ptr] "r" (p), [val] "r" (v) + : "memory"); + } +}; + +#endif // AARCH64 + +#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP --- old/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp 2018-06-04 20:53:40.623949302 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,97 +0,0 @@ -/* - * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012, 2014 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP -#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP - -#include "runtime/orderAccess.hpp" - -#ifndef PPC64 -#error "OrderAccess currently only implemented for PPC64" -#endif - -// Compiler version last used for testing: gcc 4.1.2 -// Please update this information when this file changes - -// Implementation of class OrderAccess. - -// -// Machine barrier instructions: -// -// - sync Two-way memory barrier, aka fence. -// - lwsync orders Store|Store, -// Load|Store, -// Load|Load, -// but not Store|Load -// - eieio orders Store|Store -// - isync Invalidates speculatively executed instructions, -// but isync may complete before storage accesses -// associated with instructions preceding isync have -// been performed. -// -// Semantic barrier instructions: -// (as defined in orderAccess.hpp) -// -// - release orders Store|Store, (maps to lwsync) -// Load|Store -// - acquire orders Load|Store, (maps to lwsync) -// Load|Load -// - fence orders Store|Store, (maps to sync) -// Load|Store, -// Load|Load, -// Store|Load -// - -#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory"); -#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); -#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); -#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); -// Use twi-isync for load_acquire (faster than lwsync). -#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); - -inline void OrderAccess::loadload() { inlasm_lwsync(); } -inline void OrderAccess::storestore() { inlasm_lwsync(); } -inline void OrderAccess::loadstore() { inlasm_lwsync(); } -inline void OrderAccess::storeload() { inlasm_sync(); } - -inline void OrderAccess::acquire() { inlasm_lwsync(); } -inline void OrderAccess::release() { inlasm_lwsync(); } -inline void OrderAccess::fence() { inlasm_sync(); } - - -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } -}; - -#undef inlasm_sync -#undef inlasm_lwsync -#undef inlasm_eieio -#undef inlasm_isync -#undef inlasm_acquire_reg - -#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp 2018-06-04 20:53:40.014892843 -0400 @@ -0,0 +1,97 @@ +/* + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2014 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP +#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP + +#include "runtime/orderAccess.hpp" + +#ifndef PPC64 +#error "OrderAccess currently only implemented for PPC64" +#endif + +// Compiler version last used for testing: gcc 4.1.2 +// Please update this information when this file changes + +// Implementation of class OrderAccess. + +// +// Machine barrier instructions: +// +// - sync Two-way memory barrier, aka fence. +// - lwsync orders Store|Store, +// Load|Store, +// Load|Load, +// but not Store|Load +// - eieio orders Store|Store +// - isync Invalidates speculatively executed instructions, +// but isync may complete before storage accesses +// associated with instructions preceding isync have +// been performed. +// +// Semantic barrier instructions: +// (as defined in orderAccess.hpp) +// +// - release orders Store|Store, (maps to lwsync) +// Load|Store +// - acquire orders Load|Store, (maps to lwsync) +// Load|Load +// - fence orders Store|Store, (maps to sync) +// Load|Store, +// Load|Load, +// Store|Load +// + +#define inlasm_sync() __asm__ __volatile__ ("sync" : : : "memory"); +#define inlasm_lwsync() __asm__ __volatile__ ("lwsync" : : : "memory"); +#define inlasm_eieio() __asm__ __volatile__ ("eieio" : : : "memory"); +#define inlasm_isync() __asm__ __volatile__ ("isync" : : : "memory"); +// Use twi-isync for load_acquire (faster than lwsync). +#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory"); + +inline void OrderAccess::loadload() { inlasm_lwsync(); } +inline void OrderAccess::storestore() { inlasm_lwsync(); } +inline void OrderAccess::loadstore() { inlasm_lwsync(); } +inline void OrderAccess::storeload() { inlasm_sync(); } + +inline void OrderAccess::acquire() { inlasm_lwsync(); } +inline void OrderAccess::release() { inlasm_lwsync(); } +inline void OrderAccess::fence() { inlasm_sync(); } + + +template +struct OrderAccess::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; } +}; + +#undef inlasm_sync +#undef inlasm_lwsync +#undef inlasm_eieio +#undef inlasm_isync +#undef inlasm_acquire_reg + +#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP --- old/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp 2018-06-04 20:53:41.331014848 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP -#define OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP - -#include "runtime/orderAccess.hpp" -#include "vm_version_s390.hpp" - -// Implementation of class OrderAccess. - -// -// machine barrier instructions: -// -// - z_sync two-way memory barrier, aka fence -// -// semantic barrier instructions: -// (as defined in orderAccess.hpp) -// -// - z_release orders Store|Store, (maps to compiler barrier) -// Load|Store -// - z_acquire orders Load|Store, (maps to compiler barrier) -// Load|Load -// - z_fence orders Store|Store, (maps to z_sync) -// Load|Store, -// Load|Load, -// Store|Load -// - - -// Only load-after-store-order is not guaranteed on z/Architecture, i.e. only 'fence' -// is needed. - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions. -#define inlasm_compiler_barrier() __asm__ volatile ("" : : : "memory"); -// "bcr 15, 0" is used as two way memory barrier. -#define inlasm_zarch_sync() __asm__ __volatile__ ("bcr 15, 0" : : : "memory"); - -// Release and acquire are empty on z/Architecture, but potential -// optimizations of gcc must be forbidden by OrderAccess::release and -// OrderAccess::acquire. -#define inlasm_zarch_release() inlasm_compiler_barrier() -#define inlasm_zarch_acquire() inlasm_compiler_barrier() -#define inlasm_zarch_fence() inlasm_zarch_sync() - -inline void OrderAccess::loadload() { inlasm_compiler_barrier(); } -inline void OrderAccess::storestore() { inlasm_compiler_barrier(); } -inline void OrderAccess::loadstore() { inlasm_compiler_barrier(); } -inline void OrderAccess::storeload() { inlasm_zarch_sync(); } - -inline void OrderAccess::acquire() { inlasm_zarch_acquire(); } -inline void OrderAccess::release() { inlasm_zarch_release(); } -inline void OrderAccess::fence() { inlasm_zarch_sync(); } - -template -struct OrderAccess::PlatformOrderedLoad -{ - template - T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; } -}; - -#undef inlasm_compiler_barrier -#undef inlasm_zarch_sync -#undef inlasm_zarch_release -#undef inlasm_zarch_acquire -#undef inlasm_zarch_fence - -#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp 2018-06-04 20:53:40.716957924 -0400 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP +#define OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP + +#include "runtime/orderAccess.hpp" +#include "vm_version_s390.hpp" + +// Implementation of class OrderAccess. + +// +// machine barrier instructions: +// +// - z_sync two-way memory barrier, aka fence +// +// semantic barrier instructions: +// (as defined in orderAccess.hpp) +// +// - z_release orders Store|Store, (maps to compiler barrier) +// Load|Store +// - z_acquire orders Load|Store, (maps to compiler barrier) +// Load|Load +// - z_fence orders Store|Store, (maps to z_sync) +// Load|Store, +// Load|Load, +// Store|Load +// + + +// Only load-after-store-order is not guaranteed on z/Architecture, i.e. only 'fence' +// is needed. + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions. +#define inlasm_compiler_barrier() __asm__ volatile ("" : : : "memory"); +// "bcr 15, 0" is used as two way memory barrier. +#define inlasm_zarch_sync() __asm__ __volatile__ ("bcr 15, 0" : : : "memory"); + +// Release and acquire are empty on z/Architecture, but potential +// optimizations of gcc must be forbidden by OrderAccess::release and +// OrderAccess::acquire. +#define inlasm_zarch_release() inlasm_compiler_barrier() +#define inlasm_zarch_acquire() inlasm_compiler_barrier() +#define inlasm_zarch_fence() inlasm_zarch_sync() + +inline void OrderAccess::loadload() { inlasm_compiler_barrier(); } +inline void OrderAccess::storestore() { inlasm_compiler_barrier(); } +inline void OrderAccess::loadstore() { inlasm_compiler_barrier(); } +inline void OrderAccess::storeload() { inlasm_zarch_sync(); } + +inline void OrderAccess::acquire() { inlasm_zarch_acquire(); } +inline void OrderAccess::release() { inlasm_zarch_release(); } +inline void OrderAccess::fence() { inlasm_zarch_sync(); } + +template +struct OrderAccess::PlatformOrderedLoad +{ + template + T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; } +}; + +#undef inlasm_compiler_barrier +#undef inlasm_zarch_sync +#undef inlasm_zarch_release +#undef inlasm_zarch_acquire +#undef inlasm_zarch_fence + +#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP --- old/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp 2018-06-04 20:53:42.030079651 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP -#define OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP - -#include "runtime/orderAccess.hpp" - -// Implementation of class OrderAccess. - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions -static inline void compiler_barrier() { - __asm__ volatile ("" : : : "memory"); -} - -// Assume TSO. - -inline void OrderAccess::loadload() { compiler_barrier(); } -inline void OrderAccess::storestore() { compiler_barrier(); } -inline void OrderAccess::loadstore() { compiler_barrier(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { compiler_barrier(); } -inline void OrderAccess::release() { compiler_barrier(); } - -inline void OrderAccess::fence() { - __asm__ volatile ("membar #StoreLoad" : : : "memory"); -} - -#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp 2018-06-04 20:53:41.424023469 -0400 @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP +#define OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP + +#include "runtime/orderAccess.hpp" + +// Implementation of class OrderAccess. + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +static inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + +// Assume TSO. + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } + +inline void OrderAccess::fence() { + __asm__ volatile ("membar #StoreLoad" : : : "memory"); +} + +#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP --- old/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp 2018-06-04 20:53:42.726144177 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,112 +0,0 @@ -/* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP -#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP - -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" - -// Compiler version last used for testing: gcc 4.8.2 -// Please update this information when this file changes - -// Implementation of class OrderAccess. - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions -static inline void compiler_barrier() { - __asm__ volatile ("" : : : "memory"); -} - -inline void OrderAccess::loadload() { compiler_barrier(); } -inline void OrderAccess::storestore() { compiler_barrier(); } -inline void OrderAccess::loadstore() { compiler_barrier(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { compiler_barrier(); } -inline void OrderAccess::release() { compiler_barrier(); } - -inline void OrderAccess::fence() { - if (os::is_MP()) { - // always use locked addl since mfence is sometimes expensive -#ifdef AMD64 - __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); -#else - __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); -#endif - } - compiler_barrier(); -} - -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgb (%2),%0" - : "=q" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgw (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgl (%2),%0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; - -#ifdef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm__ volatile ( "xchgq (%2), %0" - : "=r" (v) - : "0" (v), "r" (p) - : "memory"); - } -}; -#endif // AMD64 - -#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp 2018-06-04 20:53:42.120087995 -0400 @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP +#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP + +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" + +// Compiler version last used for testing: gcc 4.8.2 +// Please update this information when this file changes + +// Implementation of class OrderAccess. + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +static inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } + +inline void OrderAccess::fence() { + // always use locked addl since mfence is sometimes expensive +#ifdef AMD64 + __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); +#else + __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); +#endif + compiler_barrier(); +} + +template<> +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgb (%2),%0" + : "=q" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgw (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgl (%2),%0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; + +#ifdef AMD64 +template<> +struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm__ volatile ( "xchgq (%2), %0" + : "=r" (v) + : "0" (v), "r" (p) + : "memory"); + } +}; +#endif // AMD64 + +#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP --- old/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp 2018-06-04 20:53:43.426209073 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. - * Copyright 2007, 2008, 2009 Red Hat, Inc. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP -#define OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP - -#include "runtime/orderAccess.hpp" - -#ifdef ARM - -/* - * ARM Kernel helper for memory barrier. - * Using __asm __volatile ("":::"memory") does not work reliable on ARM - * and gcc __sync_synchronize(); implementation does not use the kernel - * helper for all gcc versions so it is unreliable to use as well. - */ -typedef void (__kernel_dmb_t) (void); -#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) - -#define FULL_MEM_BARRIER __kernel_dmb() -#define LIGHT_MEM_BARRIER __kernel_dmb() - -#else // ARM - -#define FULL_MEM_BARRIER __sync_synchronize() - -#ifdef PPC - -#ifdef __NO_LWSYNC__ -#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory") -#else -#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory") -#endif - -#else // PPC - -#ifdef ALPHA - -#define LIGHT_MEM_BARRIER __sync_synchronize() - -#else // ALPHA - -#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory") - -#endif // ALPHA - -#endif // PPC - -#endif // ARM - -// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient -// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore. - -inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::storeload() { FULL_MEM_BARRIER; } - -inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; } -inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } - -inline void OrderAccess::fence() { FULL_MEM_BARRIER; } - -#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp 2018-06-04 20:53:42.818152706 -0400 @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright 2007, 2008, 2009 Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP +#define OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP + +#include "runtime/orderAccess.hpp" + +#ifdef ARM + +/* + * ARM Kernel helper for memory barrier. + * Using __asm __volatile ("":::"memory") does not work reliable on ARM + * and gcc __sync_synchronize(); implementation does not use the kernel + * helper for all gcc versions so it is unreliable to use as well. + */ +typedef void (__kernel_dmb_t) (void); +#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0) + +#define FULL_MEM_BARRIER __kernel_dmb() +#define LIGHT_MEM_BARRIER __kernel_dmb() + +#else // ARM + +#define FULL_MEM_BARRIER __sync_synchronize() + +#ifdef PPC + +#ifdef __NO_LWSYNC__ +#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory") +#else +#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory") +#endif + +#else // PPC + +#ifdef ALPHA + +#define LIGHT_MEM_BARRIER __sync_synchronize() + +#else // ALPHA + +#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory") + +#endif // ALPHA + +#endif // PPC + +#endif // ARM + +// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient +// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore. + +inline void OrderAccess::loadload() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::loadstore() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::storeload() { FULL_MEM_BARRIER; } + +inline void OrderAccess::acquire() { LIGHT_MEM_BARRIER; } +inline void OrderAccess::release() { LIGHT_MEM_BARRIER; } + +inline void OrderAccess::fence() { FULL_MEM_BARRIER; } + +#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP --- old/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp 2018-06-04 20:53:44.123273691 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,55 +0,0 @@ -/* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP -#define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP - -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" - -// Compiler version last used for testing: solaris studio 12u3 -// Please update this information when this file changes - -// Implementation of class OrderAccess. - -// Assume TSO. - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions -inline void compiler_barrier() { - __asm__ volatile ("" : : : "memory"); -} - -inline void OrderAccess::loadload() { compiler_barrier(); } -inline void OrderAccess::storestore() { compiler_barrier(); } -inline void OrderAccess::loadstore() { compiler_barrier(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { compiler_barrier(); } -inline void OrderAccess::release() { compiler_barrier(); } - -inline void OrderAccess::fence() { - __asm__ volatile ("membar #StoreLoad" : : : "memory"); -} - -#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp 2018-06-04 20:53:43.518217602 -0400 @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP +#define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP + +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" + +// Compiler version last used for testing: solaris studio 12u3 +// Please update this information when this file changes + +// Implementation of class OrderAccess. + +// Assume TSO. + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } + +inline void OrderAccess::fence() { + __asm__ volatile ("membar #StoreLoad" : : : "memory"); +} + +#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP --- old/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp 2018-06-04 20:53:44.818338124 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,61 +0,0 @@ -/* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP -#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP - -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" - -// Compiler version last used for testing: solaris studio 12u3 -// Please update this information when this file changes - -// Implementation of class OrderAccess. - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions -inline void compiler_barrier() { - __asm__ volatile ("" : : : "memory"); -} - -inline void OrderAccess::loadload() { compiler_barrier(); } -inline void OrderAccess::storestore() { compiler_barrier(); } -inline void OrderAccess::loadstore() { compiler_barrier(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { compiler_barrier(); } -inline void OrderAccess::release() { compiler_barrier(); } - -inline void OrderAccess::fence() { - if (os::is_MP()) { -#ifdef AMD64 - __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); -#else - __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); -#endif - } - compiler_barrier(); -} - -#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp 2018-06-04 20:53:44.213282035 -0400 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP +#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP + +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" + +// Compiler version last used for testing: solaris studio 12u3 +// Please update this information when this file changes + +// Implementation of class OrderAccess. + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } + +inline void OrderAccess::fence() { +#ifdef AMD64 + __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); +#else + __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); +#endif + compiler_barrier(); +} + +#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP --- old/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp 2018-06-04 20:53:45.516402835 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,116 +0,0 @@ -/* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP -#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP - -#include -#include "runtime/atomic.hpp" -#include "runtime/orderAccess.hpp" -#include "runtime/os.hpp" - -// Compiler version last used for testing: Microsoft Visual Studio 2010 -// Please update this information when this file changes - -// Implementation of class OrderAccess. - -// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions -inline void compiler_barrier() { - _ReadWriteBarrier(); -} - -// Note that in MSVC, volatile memory accesses are explicitly -// guaranteed to have acquire release semantics (w.r.t. compiler -// reordering) and therefore does not even need a compiler barrier -// for normal acquire release accesses. And all generalized -// bound calls like release_store go through OrderAccess::load -// and OrderAccess::store which do volatile memory accesses. -template<> inline void ScopedFence::postfix() { } -template<> inline void ScopedFence::prefix() { } -template<> inline void ScopedFence::prefix() { } -template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } - -inline void OrderAccess::loadload() { compiler_barrier(); } -inline void OrderAccess::storestore() { compiler_barrier(); } -inline void OrderAccess::loadstore() { compiler_barrier(); } -inline void OrderAccess::storeload() { fence(); } - -inline void OrderAccess::acquire() { compiler_barrier(); } -inline void OrderAccess::release() { compiler_barrier(); } - -inline void OrderAccess::fence() { -#ifdef AMD64 - StubRoutines_fence(); -#else - if (os::is_MP()) { - __asm { - lock add dword ptr [esp], 0; - } - } -#endif // AMD64 - compiler_barrier(); -} - -#ifndef AMD64 -template<> -struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov al, v; - xchg al, byte ptr [edx]; - } - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov ax, v; - xchg ax, word ptr [edx]; - } - } -}; - -template<> -struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> -{ - template - void operator()(T v, volatile T* p) const { - __asm { - mov edx, p; - mov eax, v; - xchg eax, dword ptr [edx]; - } - } -}; -#endif // AMD64 - -#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP --- /dev/null 2018-04-28 00:24:55.164000301 -0400 +++ new/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp 2018-06-04 20:53:44.909346560 -0400 @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP +#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP + +#include +#include "runtime/atomic.hpp" +#include "runtime/orderAccess.hpp" + +// Compiler version last used for testing: Microsoft Visual Studio 2010 +// Please update this information when this file changes + +// Implementation of class OrderAccess. + +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +inline void compiler_barrier() { + _ReadWriteBarrier(); +} + +// Note that in MSVC, volatile memory accesses are explicitly +// guaranteed to have acquire release semantics (w.r.t. compiler +// reordering) and therefore does not even need a compiler barrier +// for normal acquire release accesses. And all generalized +// bound calls like release_store go through OrderAccess::load +// and OrderAccess::store which do volatile memory accesses. +template<> inline void ScopedFence::postfix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::prefix() { } +template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } + +inline void OrderAccess::loadload() { compiler_barrier(); } +inline void OrderAccess::storestore() { compiler_barrier(); } +inline void OrderAccess::loadstore() { compiler_barrier(); } +inline void OrderAccess::storeload() { fence(); } + +inline void OrderAccess::acquire() { compiler_barrier(); } +inline void OrderAccess::release() { compiler_barrier(); } + +inline void OrderAccess::fence() { +#ifdef AMD64 + StubRoutines_fence(); +#else + __asm { + lock add dword ptr [esp], 0; + } +#endif // AMD64 + compiler_barrier(); +} + +#ifndef AMD64 +template<> +struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov al, v; + xchg al, byte ptr [edx]; + } + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov ax, v; + xchg ax, word ptr [edx]; + } + } +}; + +template<> +struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> +{ + template + void operator()(T v, volatile T* p) const { + __asm { + mov edx, p; + mov eax, v; + xchg eax, dword ptr [edx]; + } + } +}; +#endif // AMD64 + +#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP --- old/src/hotspot/share/runtime/orderAccess.inline.hpp 2018-06-04 20:53:45.975445387 -0400 +++ /dev/null 2018-04-28 00:24:55.164000301 -0400 @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2014, 2016 SAP SE. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP -#define SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP - -#include "runtime/orderAccess.hpp" -#include "utilities/macros.hpp" - -#include OS_CPU_HEADER_INLINE(orderAccess) - -template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::acquire(); } -template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } -template<> inline void ScopedFenceGeneral::prefix() { OrderAccess::release(); } -template<> inline void ScopedFenceGeneral::postfix() { OrderAccess::fence(); } - - -template -inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) { - ScopedFence f((void*)p); - Atomic::store(v, p); -} - -template -inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) { - ScopedFence f((void*)p); - return Atomic::load(p); -} - -template -inline T OrderAccess::load_acquire(const volatile T* p) { - return LoadImpl >()(p); -} - -template -inline void OrderAccess::release_store(volatile D* p, T v) { - StoreImpl >()(v, p); -} - -template -inline void OrderAccess::release_store_fence(volatile D* p, T v) { - StoreImpl >()(v, p); -} -#endif // SHARE_VM_RUNTIME_ORDERACCESS_INLINE_HPP