src/share/vm/opto/parse3.cpp

Print this page

        

*** 231,241 **** insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. // MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; ! Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol); // Adjust Java stack if (type2size[bt] == 1) push(ld); else --- 231,242 ---- insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. // MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; ! bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; ! Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, needs_atomic_access); // Adjust Java stack if (type2size[bt] == 1) push(ld); else
*** 312,322 **** } else { field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); } store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); } else { ! store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol); } // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. if (is_vol) { --- 313,324 ---- } else { field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); } store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); } else { ! bool needs_atomic_access = is_vol || AlwaysAtomicAccesses; ! store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access); } // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. if (is_vol) {