src/share/vm/opto/parse3.cpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.

*** 226,236 **** } } else { type = Type::get_const_basic_type(bt); } // Build the load. ! Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol); // Adjust Java stack if (type2size[bt] == 1) push(ld); else --- 226,238 ---- } } else { type = Type::get_const_basic_type(bt); } // Build the load. ! // ! LoadNode::Sem sem = is_vol ? LoadNode::acquire : LoadNode::unordered; ! Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol, sem); // Adjust Java stack if (type2size[bt] == 1) push(ld); else
*** 286,307 **** // Value to be stored Node* val = type2size[bt] == 1 ? pop() : pop_pair(); // Round doubles before storing if (bt == T_DOUBLE) val = dstore_rounding(val); // Store the value. Node* store; if (bt == T_OBJECT) { const TypeOopPtr* field_type; if (!field->type()->is_loaded()) { field_type = TypeInstPtr::BOTTOM; } else { field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); } ! store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt); } else { ! store = store_to_memory( control(), adr, val, bt, adr_type, is_vol ); } // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. if (is_vol) { --- 288,319 ---- // Value to be stored Node* val = type2size[bt] == 1 ? pop() : pop_pair(); // Round doubles before storing if (bt == T_DOUBLE) val = dstore_rounding(val); + // Conservatively release stores of object references. + const StoreNode::Sem sem = + is_vol ? + // Volatile fields need releasing stores. + StoreNode::release : + // Non-volatile fields also need releasing stores if they hold an + // object reference, because the object reference might point to + // a freshly created object. + StoreNode::release_if_reference(bt); + // Store the value. Node* store; if (bt == T_OBJECT) { const TypeOopPtr* field_type; if (!field->type()->is_loaded()) { field_type = TypeInstPtr::BOTTOM; } else { field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); } ! store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, sem); } else { ! store = store_to_memory(control(), adr, val, bt, adr_type, is_vol, sem); } // If reference is volatile, prevent following volatiles ops from // floating up before the volatile write. if (is_vol) {
*** 412,422 **** const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); for (jint i = 0; i < length_con; i++) { Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); Node* eaddr = basic_plus_adr(array, offset); ! store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT); } } return array; } --- 424,434 ---- const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); for (jint i = 0; i < length_con; i++) { Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs); intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop); Node* eaddr = basic_plus_adr(array, offset); ! store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, StoreNode::unordered); } } return array; }
*** 501,511 **** dims = new_array(dims_array_klass, intcon(ndimensions), 0); // Fill-in it with values for (j = 0; j < ndimensions; j++) { Node *dims_elem = array_element_address(dims, intcon(j), T_INT); ! store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS); } } c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, OptoRuntime::multianewarrayN_Type(), --- 513,523 ---- dims = new_array(dims_array_klass, intcon(ndimensions), 0); // Fill-in it with values for (j = 0; j < ndimensions; j++) { Node *dims_elem = array_element_address(dims, intcon(j), T_INT); ! store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, false, StoreNode::unordered); } } c = make_runtime_call(RC_NO_LEAF | RC_NO_IO, OptoRuntime::multianewarrayN_Type(),