< prev index next >

src/share/vm/opto/library_call.cpp

Print this page

        

*** 2710,2733 **** // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls // from intended ones in this API. if (is_volatile) { // We need to emit leading and trailing CPU membars (see below) in // addition to memory membars when is_volatile. This is a little // too strong, but avoids the need to insert per-alias-type // volatile membars (for stores; compare Parse::do_put_xxx), which // we cannot do effectively here because we probably only have a // rough approximation of type. need_mem_bar = true; // For Stores, place a memory ordering barrier now. if (is_store) { ! insert_mem_bar(Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } } } // Memory barrier to prevent normal and 'unsafe' accesses from --- 2710,2736 ---- // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls // from intended ones in this API. + Node* load = NULL; + Node* store = NULL; + Node* leading_membar = NULL; if (is_volatile) { // We need to emit leading and trailing CPU membars (see below) in // addition to memory membars when is_volatile. This is a little // too strong, but avoids the need to insert per-alias-type // volatile membars (for stores; compare Parse::do_put_xxx), which // we cannot do effectively here because we probably only have a // rough approximation of type. need_mem_bar = true; // For Stores, place a memory ordering barrier now. if (is_store) { ! leading_membar = insert_mem_bar(Op_MemBarRelease); } else { if (support_IRIW_for_not_multiple_copy_atomic_cpu) { ! leading_membar = insert_mem_bar(Op_MemBarVolatile); } } } // Memory barrier to prevent normal and 'unsafe' accesses from
*** 2740,2750 **** if (!is_store) { MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node ! Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); // load value switch (type) { case T_BOOLEAN: case T_CHAR: case T_BYTE: --- 2743,2753 ---- if (!is_store) { MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; // To be valid, unsafe loads may depend on other conditions than // the one that guards them: pin the Load node ! load = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); // load value switch (type) { case T_BOOLEAN: case T_CHAR: case T_BYTE:
*** 2754,2780 **** case T_FLOAT: case T_DOUBLE: break; case T_OBJECT: if (need_read_barrier) { ! insert_pre_barrier(heap_base_oop, offset, p, !(is_volatile || need_mem_bar)); } break; case T_ADDRESS: // Cast to an int type. ! p = _gvn.transform(new (C) CastP2XNode(NULL, p)); ! p = ConvX2UL(p); break; default: fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); break; } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later // point is fine. ! set_result(p); } else { // place effect of store into memory switch (type) { case T_DOUBLE: val = dstore_rounding(val); --- 2757,2783 ---- case T_FLOAT: case T_DOUBLE: break; case T_OBJECT: if (need_read_barrier) { ! insert_pre_barrier(heap_base_oop, offset, load, !(is_volatile || need_mem_bar)); } break; case T_ADDRESS: // Cast to an int type. ! load = _gvn.transform(new (C) CastP2XNode(NULL, load)); ! load = ConvX2UL(load); break; default: fatal(err_msg_res("unexpected type %d: %s", type, type2name(type))); break; } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later // point is fine. ! set_result(load); } else { // place effect of store into memory switch (type) { case T_DOUBLE: val = dstore_rounding(val);
*** 2786,2807 **** break; } MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type == T_OBJECT ) { ! (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); } else { ! (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched); } } if (is_volatile) { if (!is_store) { ! insert_mem_bar(Op_MemBarAcquire); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! insert_mem_bar(Op_MemBarVolatile); } } } if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); --- 2789,2812 ---- break; } MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type == T_OBJECT ) { ! store = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); } else { ! store = store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched); } } if (is_volatile) { if (!is_store) { ! Node* mb = insert_mem_bar(Op_MemBarAcquire, load); ! mb->as_MemBar()->set_trailing_load(); } else { if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { ! Node* mb = insert_mem_bar(Op_MemBarVolatile, store); ! MemBarNode::set_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); } } } if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
*** 2997,3007 **** // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. ! insert_mem_bar(Op_MemBarRelease); insert_mem_bar(Op_MemBarCPUOrder); // 4984716: MemBars must be inserted before this // memory node in order to avoid a false // dependency which will confuse the scheduler. --- 3002,3012 ---- // Memory-model-wise, a LoadStore acts like a little synchronized // block, so needs barriers on each side. These don't translate // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. ! Node* leading_membar = insert_mem_bar(Op_MemBarRelease); insert_mem_bar(Op_MemBarCPUOrder); // 4984716: MemBars must be inserted before this // memory node in order to avoid a false // dependency which will confuse the scheduler.
*** 3096,3105 **** --- 3101,3112 ---- // main role is to prevent LoadStore nodes from being optimized away // when their results aren't used. Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); set_memory(proj, alias_idx); + Node* access = load_store; + if (type == T_OBJECT && kind == LS_xchg) { #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type())); }
*** 3115,3125 **** } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); ! insert_mem_bar(Op_MemBarAcquire); assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; } --- 3122,3133 ---- } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); ! Node* mb = insert_mem_bar(Op_MemBarAcquire, access); ! MemBarNode::set_load_store_pair(leading_membar->as_MemBar(), mb->as_MemBar()); assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; }
*** 6355,6376 **** type = TypeOopPtr::make_from_klass(field_klass->as_klass()); } else { type = Type::get_const_basic_type(bt); } if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { ! insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar ! insert_mem_bar(Op_MemBarAcquire, loadedField); } return loadedField; } --- 6363,6386 ---- type = TypeOopPtr::make_from_klass(field_klass->as_klass()); } else { type = Type::get_const_basic_type(bt); } + Node* leading_membar = NULL; if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_vol) { ! leading_membar = insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier } // Build the load. MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered; Node* loadedField = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, is_vol); // If reference is volatile, prevent following memory ops from // floating up past the volatile read. Also prevents commoning // another volatile read. if (is_vol) { // Memory barrier includes bogus read of value to force load BEFORE membar ! Node* mb = insert_mem_bar(Op_MemBarAcquire, loadedField); ! mb->as_MemBar()->set_trailing_load(); } return loadedField; }
< prev index next >