< prev index next >

src/share/vm/opto/parse3.cpp

Print this page

        

*** 194,203 **** --- 194,204 ---- } } } } + Node* leading_membar = NULL; ciType* field_klass = field->type(); bool is_vol = field->is_volatile(); // Compute address and memory type. int offset = field->offset_in_bytes();
*** 226,236 **** } } else { type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { ! insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. // MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); --- 227,237 ---- } } else { type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) { ! leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. // MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol);
*** 270,289 **** // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (field->is_volatile()) { // Memory barrier includes bogus read of value to force load BEFORE membar ! insert_mem_bar(Op_MemBarAcquire, ld); } } void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { bool is_vol = field->is_volatile(); // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. ! if (is_vol) insert_mem_bar(Op_MemBarRelease); // Compute address and memory type. int offset = field->offset_in_bytes(); const TypePtr* adr_type = C->alias_type(field)->adr_type(); Node* adr = basic_plus_adr(obj, obj, offset); --- 271,295 ---- // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (field->is_volatile()) { // Memory barrier includes bogus read of value to force load BEFORE membar ! assert(leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); ! Node* mb = insert_mem_bar(Op_MemBarAcquire, ld); ! mb->as_MemBar()->set_trailing_load(); } } void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { + Node* leading_membar = NULL; bool is_vol = field->is_volatile(); // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. ! if (is_vol) { ! leading_membar = insert_mem_bar(Op_MemBarRelease); ! } // Compute address and memory type. int offset = field->offset_in_bytes(); const TypePtr* adr_type = C->alias_type(field)->adr_type(); Node* adr = basic_plus_adr(obj, obj, offset);
*** 320,330 **** // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. if (is_vol) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); // Use fat membar } // Remember we wrote a volatile field. // For not multiple copy atomic cpu (ppc64) a barrier should be issued // in constructors which have such stores. See do_exits() in parse1.cpp. if (is_field) { --- 326,337 ---- // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. if (is_vol) { // If not multiple copy atomic, we do the MemBarVolatile before the load. if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! Node* mb = insert_mem_bar(Op_MemBarVolatile, store); // Use fat membar ! MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); } // Remember we wrote a volatile field. // For not multiple copy atomic cpu (ppc64) a barrier should be issued // in constructors which have such stores. See do_exits() in parse1.cpp. if (is_field) {
< prev index next >