--- old/src/share/vm/opto/parse3.cpp 2021-01-25 19:30:48.552970045 +0000 +++ new/src/share/vm/opto/parse3.cpp 2021-01-25 19:30:48.436968826 +0000 @@ -196,6 +196,7 @@ } } + Node* leading_membar = NULL; ciType* field_klass = field->type(); bool is_vol = field->is_volatile(); @@ -228,7 +229,7 @@ type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { - insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier + leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. // @@ -272,16 +273,21 @@ // another volatile read. if (field->is_volatile()) { // Memory barrier includes bogus read of value to force load BEFORE membar - insert_mem_bar(Op_MemBarAcquire, ld); + assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); + Node* mb = insert_mem_bar(Op_MemBarAcquire, ld); + mb->as_MemBar()->set_trailing_load(); } } void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { + Node* leading_membar = NULL; bool is_vol = field->is_volatile(); // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. - if (is_vol) insert_mem_bar(Op_MemBarRelease); + if (is_vol) { + leading_membar = insert_mem_bar(Op_MemBarRelease); + } // Compute address and memory type. int offset = field->offset_in_bytes(); @@ -322,7 +328,8 @@ if (is_vol) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { - insert_mem_bar(Op_MemBarVolatile); // Use fat membar + Node* mb = insert_mem_bar(Op_MemBarVolatile, store); // Use fat membar + MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); } // Remember we wrote a volatile field. // For not multiple copy atomic cpu (ppc64) a barrier should be issued