# HG changeset patch # User rkennke # Date 1528215475 -7200 # Tue Jun 05 18:17:55 2018 +0200 # Node ID 844c936e5a90821e847a212c0c59e72fba62b553 # Parent 7e8c0409a7477082df8e58be4a7511b00796aaec 8200623: Primitive heap access for interpreter BarrierSetAssembler/x86 diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp --- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp @@ -34,6 +34,7 @@ bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; + bool atomic = (decorators & MO_RELAXED) != 0; switch (type) { case T_OBJECT: @@ -58,6 +59,37 @@ } break; } + case T_BOOLEAN: __ load_unsigned_byte(dst, src); break; + case T_BYTE: __ load_signed_byte(dst, src); break; + case T_CHAR: __ load_unsigned_short(dst, src); break; + case T_SHORT: __ load_signed_short(dst, src); break; + case T_INT: __ movl (dst, src); break; + case T_ADDRESS: __ movptr(dst, src); break; + case T_FLOAT: + assert(dst == noreg, "only to ftos"); + __ load_float(src); + break; + case T_DOUBLE: + assert(dst == noreg, "only to dtos"); + __ load_double(src); + break; + case T_LONG: + assert(dst == noreg, "only to ltos"); +#ifdef _LP64 + __ movq(rax, src); +#else + if (atomic) { + __ fild_d(src); // Must load atomically + __ subptr(rsp,2*wordSize); // Make space for store + __ fistp_d(Address(rsp,0)); + __ pop(rax); + __ pop(rdx); + } else { + __ movl(rax, src); + __ movl(rdx, src.plus_disp(wordSize)); + } +#endif + break; default: Unimplemented(); } } @@ -67,6 +99,7 @@ bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; + bool atomic = (decorators & MO_RELAXED) != 0; switch (type) { case T_OBJECT: @@ -106,6 +139,50 @@ } break; } + case T_BOOLEAN: + __ andl(val, 0x1); // boolean is true if LSB is 1 + __ movb(dst, val); + break; + case T_BYTE: + __ movb(dst, val); + break; + case T_SHORT: + __ movw(dst, val); + break; + case T_CHAR: + __ movw(dst, val); + break; + case T_INT: + __ movl(dst, val); + break; + case T_LONG: + assert(val == noreg, "only tos"); +#ifdef _LP64 + __ movq(dst, rax); +#else + if (atomic) { + __ push(rdx); + __ push(rax); // Must update atomically with FIST + __ fild_d(Address(rsp,0)); // So load into FPU register + __ fistp_d(dst); // and put into memory atomically + __ addptr(rsp, 2*wordSize); + } else { + __ movptr(dst, rax); + __ movptr(dst.plus_disp(wordSize), rdx); + } +#endif + break; + case T_FLOAT: + assert(val == noreg, "only tos"); + __ store_float(dst); + break; + case T_DOUBLE: + assert(val == noreg, "only tos"); + __ store_double(dst); + break; + case T_ADDRESS: + __ movptr(dst, val); + break; default: Unimplemented(); } } diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp --- a/src/hotspot/cpu/x86/methodHandles_x86.cpp +++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp @@ -175,7 +175,9 @@ __ verify_oop(method_temp); __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2); __ verify_oop(method_temp); - __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()))); + __ access_load_at(T_ADDRESS, IN_HEAP, method_temp, + Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), + noreg, noreg); if (VerifyMethodHandles && !for_compiler_entry) { // make sure recv is already on stack @@ -390,7 +392,7 @@ verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); } __ load_heap_oop(rbx_method, member_vmtarget); - __ movptr(rbx_method, vmtarget_method); + __ access_load_at(T_ADDRESS, IN_HEAP, rbx_method, vmtarget_method, noreg, noreg); break; case vmIntrinsics::_linkToStatic: @@ -398,7 +400,7 @@ verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); } __ load_heap_oop(rbx_method, member_vmtarget); - __ movptr(rbx_method, vmtarget_method); + __ access_load_at(T_ADDRESS, IN_HEAP, rbx_method, vmtarget_method, noreg, noreg); break; case vmIntrinsics::_linkToVirtual: @@ -412,7 +414,7 @@ // pick out the vtable index from the MemberName, and then we can discard it: Register temp2_index = temp2; - __ movptr(temp2_index, member_vmindex); + __ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg); if (VerifyMethodHandles) { Label L_index_ok; @@ -446,7 +448,7 @@ __ verify_klass_ptr(temp3_intf); Register rbx_index = rbx_method; - __ movptr(rbx_index, member_vmindex); + __ access_load_at(T_ADDRESS, IN_HEAP, rbx_index, member_vmindex, noreg, noreg); if (VerifyMethodHandles) { Label L; __ cmpl(rbx_index, 0); diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -770,9 +770,10 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ movl(rax, Address(rdx, rax, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_INT))); + __ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, rax, + Address(rdx, rax, Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_INT)), + noreg, noreg); } void TemplateTable::laload() { @@ -782,8 +783,10 @@ index_check(rdx, rax); // kills rbx NOT_LP64(__ mov(rbx, rax)); // rbx,: index - __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize)); - NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize))); + __ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, noreg /* ltos */, + Address(rdx, rbx, Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_LONG)), + noreg, noreg); } @@ -793,9 +796,11 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_float(Address(rdx, rax, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_FLOAT))); + __ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, noreg /* ftos */, + Address(rdx, rax, + Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_FLOAT)), + noreg, noreg); } void TemplateTable::daload() { @@ -803,9 +808,11 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_double(Address(rdx, rax, - Address::times_8, - arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); + __ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, noreg /* dtos */, + Address(rdx, rax, + Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), + noreg, noreg); } void TemplateTable::aaload() { @@ -826,7 +833,9 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE))); + __ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, rax, + Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), + noreg, noreg); } void TemplateTable::caload() { @@ -834,7 +843,9 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); + __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax, + Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), + noreg, noreg); } // iload followed by caload frequent pair @@ -847,10 +858,9 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_unsigned_short(rax, - Address(rdx, rax, - Address::times_2, - arrayOopDesc::base_offset_in_bytes(T_CHAR))); + __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax, + Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), + noreg, noreg); } @@ -859,7 +869,9 @@ // rax: index // rdx: array index_check(rdx, rax); // kills rbx - __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT))); + __ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, rax, + Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)), + noreg, noreg); } void TemplateTable::iload(int n) { @@ -1051,10 +1063,10 @@ // rbx: index // rdx: array index_check(rdx, rbx); // prefer index in rbx - __ movl(Address(rdx, rbx, - Address::times_4, - arrayOopDesc::base_offset_in_bytes(T_INT)), - rax); + __ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, + Address(rdx, rbx, Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_INT)), + rax, noreg, noreg); } void TemplateTable::lastore() { @@ -1065,8 +1077,10 @@ // rdx: high(value) index_check(rcx, rbx); // prefer index in rbx, // rbx,: index - __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax); - NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx)); + __ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, + Address(rcx, rbx, Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_LONG)), + noreg /* ltos */, noreg, noreg); } @@ -1077,7 +1091,10 @@ // rbx: index // rdx: array index_check(rdx, rbx); // prefer index in rbx - __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT))); + __ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, + Address(rdx, rbx, Address::times_4, + arrayOopDesc::base_offset_in_bytes(T_FLOAT)), + noreg /* ftos */, noreg, noreg); } void TemplateTable::dastore() { @@ -1087,7 +1104,10 @@ // rbx: index // rdx: array index_check(rdx, rbx); // prefer index in rbx - __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE))); + __ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, + Address(rdx, rbx, Address::times_8, + arrayOopDesc::base_offset_in_bytes(T_DOUBLE)), + noreg /* dtos */, noreg, noreg); } void TemplateTable::aastore() { @@ -1160,10 +1180,10 @@ __ jccb(Assembler::zero, L_skip); __ andl(rax, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1 __ bind(L_skip); - __ movb(Address(rdx, rbx, - Address::times_1, - arrayOopDesc::base_offset_in_bytes(T_BYTE)), - rax); + __ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, + Address(rdx, rbx,Address::times_1, + arrayOopDesc::base_offset_in_bytes(T_BYTE)), + rax, noreg, noreg); } void TemplateTable::castore() { @@ -1173,10 +1193,10 @@ // rbx: index // rdx: array index_check(rdx, rbx); // prefer index in rbx - __ movw(Address(rdx, rbx, - Address::times_2, - arrayOopDesc::base_offset_in_bytes(T_CHAR)), - rax); + __ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, + Address(rdx, rbx, Address::times_2, + arrayOopDesc::base_offset_in_bytes(T_CHAR)), + rax, noreg, noreg); } @@ -2852,7 +2872,6 @@ if (!is_static) pop_and_check_object(obj); const Address field(obj, off, Address::times_1, 0*wordSize); - NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize)); Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble; @@ -2864,7 +2883,7 @@ __ jcc(Assembler::notZero, notByte); // btos - __ load_signed_byte(rax, field); + __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg); __ push(btos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -2877,7 +2896,7 @@ __ jcc(Assembler::notEqual, notBool); // ztos (same code as btos) - __ load_signed_byte(rax, field); + __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg); __ push(ztos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -2901,7 +2920,7 @@ __ cmpl(flags, itos); __ jcc(Assembler::notEqual, notInt); // itos - __ movl(rax, field); + __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg); __ push(itos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -2913,7 +2932,7 @@ __ cmpl(flags, ctos); __ jcc(Assembler::notEqual, notChar); // ctos - __ load_unsigned_short(rax, field); + __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg); __ push(ctos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -2925,7 +2944,7 @@ __ cmpl(flags, stos); __ jcc(Assembler::notEqual, notShort); // stos - __ load_signed_short(rax, field); + __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg); __ push(stos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -2937,19 +2956,9 @@ __ cmpl(flags, ltos); __ jcc(Assembler::notEqual, notLong); // ltos - -#ifndef _LP64 - // Generate code as if volatile. There just aren't enough registers to - // save that information and this code is faster than the test. - __ fild_d(field); // Must load atomically - __ subptr(rsp,2*wordSize); // Make space for store - __ fistp_d(Address(rsp,0)); - __ pop(rax); - __ pop(rdx); -#else - __ movq(rax, field); -#endif - + // Generate code as if volatile (x86_32). There just aren't enough registers to + // save that information and this code is faster than the test. + __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg); __ push(ltos); // Rewrite bytecode to be faster LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx)); @@ -2960,7 +2969,7 @@ __ jcc(Assembler::notEqual, notFloat); // ftos - __ load_float(field); + __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); __ push(ftos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -2974,7 +2983,7 @@ __ jcc(Assembler::notEqual, notDouble); #endif // dtos - __ load_double(field); + __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg); __ push(dtos); // Rewrite bytecode to be faster if (!is_static && rc == may_rewrite) { @@ -3133,7 +3142,7 @@ { __ pop(btos); if (!is_static) pop_and_check_object(obj); - __ movb(field, rax); + __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no); } @@ -3148,8 +3157,7 @@ { __ pop(ztos); if (!is_static) pop_and_check_object(obj); - __ andl(rax, 0x1); - __ movb(field, rax); + __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no); } @@ -3180,7 +3188,7 @@ { __ pop(itos); if (!is_static) pop_and_check_object(obj); - __ movl(field, rax); + __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no); } @@ -3195,7 +3203,7 @@ { __ pop(ctos); if (!is_static) pop_and_check_object(obj); - __ movw(field, rax); + __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no); } @@ -3210,7 +3218,7 @@ { __ pop(stos); if (!is_static) pop_and_check_object(obj); - __ movw(field, rax); + __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no); } @@ -3226,7 +3234,7 @@ { __ pop(ltos); if (!is_static) pop_and_check_object(obj); - __ movq(field, rax); + __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos*/, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no); } @@ -3242,11 +3250,7 @@ if (!is_static) pop_and_check_object(obj); // Replace with real volatile test - __ push(rdx); - __ push(rax); // Must update atomically with FIST - __ fild_d(Address(rsp,0)); // So load into FPU register - __ fistp_d(field); // and put into memory atomically - __ addptr(rsp, 2*wordSize); + __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos */, noreg, noreg); // volatile_barrier(); volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad | Assembler::StoreStore)); @@ -3257,8 +3261,7 @@ __ pop(ltos); // overwrites rdx if (!is_static) pop_and_check_object(obj); - __ movptr(hi, rdx); - __ movptr(field, rax); + __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg); // Don't rewrite to _fast_lputfield for potential volatile case. __ jmp(notVolatile); } @@ -3272,7 +3275,7 @@ { __ pop(ftos); if (!is_static) pop_and_check_object(obj); - __ store_float(field); + __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no); } @@ -3289,7 +3292,7 @@ { __ pop(dtos); if (!is_static) pop_and_check_object(obj); - __ store_double(field); + __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg); if (!is_static && rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no); } @@ -3422,30 +3425,31 @@ break; case Bytecodes::_fast_lputfield: #ifdef _LP64 - __ movq(field, rax); + __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg); #else __ stop("should not be rewritten"); #endif break; case Bytecodes::_fast_iputfield: - __ movl(field, rax); + __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg); break; case Bytecodes::_fast_zputfield: - __ andl(rax, 0x1); // boolean is true if LSB is 1 - // fall through to bputfield + __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg); + break; case Bytecodes::_fast_bputfield: - __ movb(field, rax); + __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg); break; case Bytecodes::_fast_sputfield: - // fall through + __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg); + break; case Bytecodes::_fast_cputfield: - __ movw(field, rax); + __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg); break; case Bytecodes::_fast_fputfield: - __ store_float(field); + __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg); break; case Bytecodes::_fast_dputfield: - __ store_double(field); + __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg); break; default: ShouldNotReachHere(); @@ -3512,28 +3516,28 @@ break; case Bytecodes::_fast_lgetfield: #ifdef _LP64 - __ movq(rax, field); + __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg); #else __ stop("should not be rewritten"); #endif break; case Bytecodes::_fast_igetfield: - __ movl(rax, field); + __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg); break; case Bytecodes::_fast_bgetfield: - __ movsbl(rax, field); + __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg); break; case Bytecodes::_fast_sgetfield: - __ load_signed_short(rax, field); + __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg); break; case Bytecodes::_fast_cgetfield: - __ load_unsigned_short(rax, field); + __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg); break; case Bytecodes::_fast_fgetfield: - __ load_float(field); + __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); break; case Bytecodes::_fast_dgetfield: - __ load_double(field); + __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg); break; default: ShouldNotReachHere(); @@ -3566,14 +3570,14 @@ const Address field = Address(rax, rbx, Address::times_1, 0*wordSize); switch (state) { case itos: - __ movl(rax, field); + __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg); break; case atos: do_oop_load(_masm, field, rax); __ verify_oop(rax); break; case ftos: - __ load_float(field); + __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); break; default: ShouldNotReachHere();