src/share/vm/opto/library_call.cpp

Print this page
rev 5661 : 8024921: PPC64 (part 113): Extend Load and Store nodes to know about memory ordering.

*** 1055,1065 **** Node* LibraryCallKit::generate_current_thread(Node* &tls_output) { ciKlass* thread_klass = env()->Thread_klass(); const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); Node* thread = _gvn.transform(new (C) ThreadLocalNode()); Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset())); ! Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT); tls_output = thread; return threadObj; } --- 1055,1065 ---- Node* LibraryCallKit::generate_current_thread(Node* &tls_output) { ciKlass* thread_klass = env()->Thread_klass(); const Type* thread_type = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull); Node* thread = _gvn.transform(new (C) ThreadLocalNode()); Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset())); ! Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, false, LoadNode::unordered); tls_output = thread; return threadObj; }
*** 2638,2648 **** // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); if (!is_store) { ! Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile); // load value switch (type) { case T_BOOLEAN: case T_CHAR: case T_BYTE: --- 2638,2648 ---- // of safe & unsafe memory. Otherwise fails in a CTW of rt.jar // around 5701, class sun/reflect/UnsafeBooleanFieldAccessorImpl. if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder); if (!is_store) { ! Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile, LoadNode::unordered); // load value switch (type) { case T_BOOLEAN: case T_CHAR: case T_BYTE:
*** 2682,2698 **** val = ConvL2X(val); val = _gvn.transform(new (C) CastX2PNode(val)); break; } if (type != T_OBJECT ) { ! (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile); } else { // Possibly an oop being stored to Java heap or native memory if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { // oop to Java heap. ! (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type); } else { // We can't tell at compile time if we are storing in the Java heap or outside // of it. So we need to emit code to conditionally do the proper type of // store. --- 2682,2699 ---- val = ConvL2X(val); val = _gvn.transform(new (C) CastX2PNode(val)); break; } + StoreNode::Sem sem = is_volatile ? StoreNode::release : StoreNode::unordered; if (type != T_OBJECT ) { ! (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile, sem); } else { // Possibly an oop being stored to Java heap or native memory if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { // oop to Java heap. ! (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, sem); } else { // We can't tell at compile time if we are storing in the Java heap or outside // of it. So we need to emit code to conditionally do the proper type of // store.
*** 2700,2714 **** #define __ ideal. // QQQ who knows what probability is here?? __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { // Sync IdealKit and graphKit. sync_kit(ideal); ! Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type); // Update IdealKit memory. __ sync_kit(this); } __ else_(); { ! __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile); } __ end_if(); // Final sync IdealKit and GraphKit. final_sync(ideal); #undef __ } --- 2701,2715 ---- #define __ ideal. // QQQ who knows what probability is here?? __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { // Sync IdealKit and graphKit. sync_kit(ideal); ! Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, sem); // Update IdealKit memory. __ sync_kit(this); } __ else_(); { ! __ store(__ ctrl(), adr, val, type, alias_type->index(), false, sem); } __ end_if(); // Final sync IdealKit and GraphKit. final_sync(ideal); #undef __ }
*** 3026,3036 **** --- 3027,3043 ---- } } // Add the trailing membar surrounding the access insert_mem_bar(Op_MemBarCPUOrder); + // On power we need a fence to prevent succeeding loads from floating + // above the store of the compare-exchange. + #ifdef PPC64 + insert_mem_bar(Op_MemBarVolatile); + #else insert_mem_bar(Op_MemBarAcquire); + #endif assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match"); set_result(load_store); return true; }
*** 3088,3115 **** insert_mem_bar(Op_MemBarCPUOrder); // Ensure that the store is atomic for longs: const bool require_atomic_access = true; Node* store; if (type == T_OBJECT) // reference stores need a store barrier. ! store = store_oop_to_unknown(control(), base, adr, adr_type, val, type); else { ! store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access); } insert_mem_bar(Op_MemBarCPUOrder); return true; } bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { // Regardless of form, don't allow previous ld/st to move down, // then issue acquire, release, or volatile mem_bar. insert_mem_bar(Op_MemBarCPUOrder); switch(id) { case vmIntrinsics::_loadFence: insert_mem_bar(Op_MemBarAcquire); return true; case vmIntrinsics::_storeFence: insert_mem_bar(Op_MemBarRelease); return true; case vmIntrinsics::_fullFence: insert_mem_bar(Op_MemBarVolatile); return true; default: --- 3095,3138 ---- insert_mem_bar(Op_MemBarCPUOrder); // Ensure that the store is atomic for longs: const bool require_atomic_access = true; Node* store; if (type == T_OBJECT) // reference stores need a store barrier. ! store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, StoreNode::release); else { ! store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access, StoreNode::release); } insert_mem_bar(Op_MemBarCPUOrder); return true; } bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) { // Regardless of form, don't allow previous ld/st to move down, // then issue acquire, release, or volatile mem_bar. + #if !defined PPC64 && !defined(IA64) insert_mem_bar(Op_MemBarCPUOrder); + #endif switch(id) { case vmIntrinsics::_loadFence: + // On PPC and IA64 MemBarAcquire is implemented empty, as the acquire + // is issued together with the Load instructions. On IA64, MemBarRelease + // is empty for the same reason. + #ifdef PPC64 + insert_mem_bar(Op_MemBarRelease); + insert_mem_bar(Op_MemBarCPUOrder); + #elif defined(IA64) + insert_mem_bar(Op_MemBarVolatile); + #else insert_mem_bar(Op_MemBarAcquire); + #endif return true; case vmIntrinsics::_storeFence: + #ifndef IA64 insert_mem_bar(Op_MemBarRelease); + #else + insert_mem_bar(Op_MemBarVolatile); + #endif return true; case vmIntrinsics::_fullFence: insert_mem_bar(Op_MemBarVolatile); return true; default:
*** 3150,3160 **** // Serializable.class or Object[].class. The runtime will handle it. // But we must make an explicit check for initialization. Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler // can generate code to load it as unsigned byte. ! Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN); Node* bits = intcon(InstanceKlass::fully_initialized); test = _gvn.transform(new (C) SubINode(inst, bits)); // The 'test' is non-zero if we need to take a slow path. } --- 3173,3183 ---- // Serializable.class or Object[].class. The runtime will handle it. // But we must make an explicit check for initialization. Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler // can generate code to load it as unsigned byte. ! Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, false, LoadNode::unordered); Node* bits = intcon(InstanceKlass::fully_initialized); test = _gvn.transform(new (C) SubINode(inst, bits)); // The 'test' is non-zero if we need to take a slow path. }
*** 3273,3287 **** generate_slow_guard(bol_thr, slow_region); // (b) Interrupt bit on TLS must be false. Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); ! Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS); p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset())); // Set the control input on the field _interrupted read to prevent it floating up. ! Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT); Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0))); Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne)); IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); --- 3296,3310 ---- generate_slow_guard(bol_thr, slow_region); // (b) Interrupt bit on TLS must be false. Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset())); ! Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, false, LoadNode::unordered); p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset())); // Set the control input on the field _interrupted read to prevent it floating up. ! Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, false, LoadNode::unordered); Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0))); Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne)); IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
*** 3345,3355 **** //---------------------------load_mirror_from_klass---------------------------- // Given a klass oop, load its java mirror (a java.lang.Class oop). Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); ! return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT); } //-----------------------load_klass_from_mirror_common------------------------- // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), --- 3368,3378 ---- //---------------------------load_mirror_from_klass---------------------------- // Given a klass oop, load its java mirror (a java.lang.Class oop). Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); ! return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, false, LoadNode::unordered); } //-----------------------load_klass_from_mirror_common------------------------- // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. // Test the klass oop for null (signifying a primitive Class like Integer.TYPE),
*** 3382,3392 **** // Fall through if (mods & mask) == bits, take the guard otherwise. Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) { // Branch around if the given klass has the given modifier bit set. // Like generate_guard, adds a new path onto the region. Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); ! Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); Node* mask = intcon(modifier_mask); Node* bits = intcon(modifier_bits); Node* mbit = _gvn.transform(new (C) AndINode(mods, mask)); Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits)); Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); --- 3405,3415 ---- // Fall through if (mods & mask) == bits, take the guard otherwise. Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) { // Branch around if the given klass has the given modifier bit set. // Like generate_guard, adds a new path onto the region. Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); ! Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, false, LoadNode::unordered); Node* mask = intcon(modifier_mask); Node* bits = intcon(modifier_bits); Node* mbit = _gvn.transform(new (C) AndINode(mods, mask)); Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits)); Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne));
*** 3499,3509 **** query_value = gen_instanceof(obj, kls, safe_for_replace); break; case vmIntrinsics::_getModifiers: p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); ! query_value = make_load(NULL, p, TypeInt::INT, T_INT); break; case vmIntrinsics::_isInterface: // (To verify this code sequence, check the asserts in JVM_IsInterface.) if (generate_interface_guard(kls, region) != NULL) --- 3522,3532 ---- query_value = gen_instanceof(obj, kls, safe_for_replace); break; case vmIntrinsics::_getModifiers: p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset())); ! query_value = make_load(NULL, p, TypeInt::INT, T_INT, false, LoadNode::unordered); break; case vmIntrinsics::_isInterface: // (To verify this code sequence, check the asserts in JVM_IsInterface.) if (generate_interface_guard(kls, region) != NULL)
*** 3557,3575 **** case vmIntrinsics::_getComponentType: if (generate_array_guard(kls, region) != NULL) { // Be sure to pin the oop load to the guard edge just created: Node* is_array_ctrl = region->in(region->req()-1); Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset())); ! Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT); phi->add_req(cmo); } query_value = null(); // non-array case is null break; case vmIntrinsics::_getClassAccessFlags: p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); ! query_value = make_load(NULL, p, TypeInt::INT, T_INT); break; default: fatal_unexpected_iid(id); break; --- 3580,3598 ---- case vmIntrinsics::_getComponentType: if (generate_array_guard(kls, region) != NULL) { // Be sure to pin the oop load to the guard edge just created: Node* is_array_ctrl = region->in(region->req()-1); Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset())); ! Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, false, LoadNode::unordered); phi->add_req(cmo); } query_value = null(); // non-array case is null break; case vmIntrinsics::_getClassAccessFlags: p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset())); ! query_value = make_load(NULL, p, TypeInt::INT, T_INT, false, LoadNode::unordered); break; default: fatal_unexpected_iid(id); break;
*** 3931,3941 **** // Get the Method* out of the appropriate vtable entry. int entry_offset = (InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size()) * wordSize + vtableEntry::method_offset_in_bytes(); Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); ! Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS); // Compare the target method with the expected method (e.g., Object.hashCode). const TypePtr* native_call_addr = TypeMetadataPtr::make(method); Node* native_call = makecon(native_call_addr); --- 3954,3964 ---- // Get the Method* out of the appropriate vtable entry. int entry_offset = (InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size()) * wordSize + vtableEntry::method_offset_in_bytes(); Node* entry_addr = basic_plus_adr(obj_klass, entry_offset); ! Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, false, LoadNode::unordered); // Compare the target method with the expected method (e.g., Object.hashCode). const TypePtr* native_call_addr = TypeMetadataPtr::make(method); Node* native_call = makecon(native_call_addr);
*** 4057,4067 **** generate_virtual_guard(obj_klass, slow_region); } // Get the header out of the object, use LoadMarkNode when available Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); ! Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type()); // Test the header to see if it is unlocked. Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); --- 4080,4090 ---- generate_virtual_guard(obj_klass, slow_region); } // Get the header out of the object, use LoadMarkNode when available Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); ! Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), false, LoadNode::unordered); // Test the header to see if it is unlocked. Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value);
*** 5478,5488 **** start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear))); if (bump_bit != 0) { // Store a zero to the immediately preceding jint: Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit))); Node* p1 = basic_plus_adr(dest, x1); ! mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); mem = _gvn.transform(mem); } } Node* end = dest_size; // pre-rounded mem = ClearArrayNode::clear_memory(control(), mem, dest, --- 5501,5511 ---- start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear))); if (bump_bit != 0) { // Store a zero to the immediately preceding jint: Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit))); Node* p1 = basic_plus_adr(dest, x1); ! mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, StoreNode::unordered); mem = _gvn.transform(mem); } } Node* end = dest_size; // pre-rounded mem = ClearArrayNode::clear_memory(control(), mem, dest,
*** 5528,5539 **** // This is a common case, since abase can be odd mod 8. if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { Node* sptr = basic_plus_adr(src, src_off); Node* dptr = basic_plus_adr(dest, dest_off); ! Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type); ! store_to_memory(control(), dptr, sval, T_INT, adr_type); src_off += BytesPerInt; dest_off += BytesPerInt; } else { return false; } --- 5551,5562 ---- // This is a common case, since abase can be odd mod 8. if (((src_off | dest_off) & (BytesPerLong-1)) == BytesPerInt && ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) { Node* sptr = basic_plus_adr(src, src_off); Node* dptr = basic_plus_adr(dest, dest_off); ! Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, false, LoadNode::unordered); ! store_to_memory(control(), dptr, sval, T_INT, adr_type, false, StoreNode::unordered); src_off += BytesPerInt; dest_off += BytesPerInt; } else { return false; }
*** 5594,5604 **** // for the target array. This is an optimistic check. It will // look in each non-null element's class, at the desired klass's // super_check_offset, for the desired klass. int sco_offset = in_bytes(Klass::super_check_offset_offset()); Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); ! Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr()); Node* check_offset = ConvI2X(_gvn.transform(n3)); Node* check_value = dest_elem_klass; Node* src_start = array_element_address(src, src_offset, T_OBJECT); Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT); --- 5617,5627 ---- // for the target array. This is an optimistic check. It will // look in each non-null element's class, at the desired klass's // super_check_offset, for the desired klass. int sco_offset = in_bytes(Klass::super_check_offset_offset()); Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); ! Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, LoadNode::unordered); Node* check_offset = ConvI2X(_gvn.transform(n3)); Node* check_value = dest_elem_klass; Node* src_start = array_element_address(src, src_offset, T_OBJECT); Node* dest_start = array_element_address(dest, dest_offset, T_OBJECT);
*** 5735,5745 **** result = _gvn.transform(new (C) AndINode(result, intcon(0xFF))); Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2))); Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); ! result = make_load(control(), adr, TypeInt::INT, T_INT); crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8))); result = _gvn.transform(new (C) XorINode(crc, result)); result = _gvn.transform(new (C) XorINode(result, M1)); set_result(result); --- 5758,5768 ---- result = _gvn.transform(new (C) AndINode(result, intcon(0xFF))); Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2))); Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); ! result = make_load(control(), adr, TypeInt::INT, T_INT, false, LoadNode::unordered); crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8))); result = _gvn.transform(new (C) XorINode(crc, result)); result = _gvn.transform(new (C) XorINode(result, M1)); set_result(result);
*** 5836,5846 **** ciInstanceKlass* klass = env()->Object_klass(); const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); Node* no_ctrl = NULL; ! Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT); // Use the pre-barrier to record the value in the referent field pre_barrier(false /* do_load */, control(), NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */, --- 5859,5869 ---- ciInstanceKlass* klass = env()->Object_klass(); const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass); Node* no_ctrl = NULL; ! Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, false, LoadNode::unordered); // Use the pre-barrier to record the value in the referent field pre_barrier(false /* do_load */, control(), NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
*** 5883,5893 **** // Build the resultant type of the load const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass()); // Build the load. ! Node* loadedField = make_load(NULL, adr, type, bt, adr_type, is_vol); return loadedField; } //------------------------------inline_aescrypt_Block----------------------- --- 5906,5916 ---- // Build the resultant type of the load const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass()); // Build the load. ! Node* loadedField = make_load(NULL, adr, type, bt, adr_type, is_vol, LoadNode::unordered); return loadedField; } //------------------------------inline_aescrypt_Block-----------------------