< prev index next >

src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp

Print this page
rev 50039 : 8200623: Primitive heap access for interpreter BarrierSetAssembler/x86

*** 32,41 **** --- 32,42 ---- void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread) { bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; + bool atomic = (decorators & MO_RELAXED) != 0; switch (type) { case T_OBJECT: case T_ARRAY: { if (on_heap) {
*** 56,74 **** --- 57,107 ---- assert(on_root, "why else?"); __ movptr(dst, src); } break; } + case T_BOOLEAN: __ load_unsigned_byte(dst, src); break; + case T_BYTE: __ load_signed_byte(dst, src); break; + case T_CHAR: __ load_unsigned_short(dst, src); break; + case T_SHORT: __ load_signed_short(dst, src); break; + case T_INT: __ movl (dst, src); break; + case T_ADDRESS: __ movptr(dst, src); break; + case T_FLOAT: + assert(dst == noreg, "only to ftos"); + __ load_float(src); + break; + case T_DOUBLE: + assert(dst == noreg, "only to dtos"); + __ load_double(src); + break; + case T_LONG: + assert(dst == noreg, "only to ltos"); + #ifdef _LP64 + __ movq(rax, src); + #else + if (atomic) { + __ fild_d(src); // Must load atomically + __ subptr(rsp,2*wordSize); // Make space for store + __ fistp_d(Address(rsp,0)); + __ pop(rax); + __ pop(rdx); + } else { + __ movl(rax, src); + __ movl(rdx, src.plus_disp(wordSize)); + } + #endif + break; default: Unimplemented(); } } void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { bool on_heap = (decorators & IN_HEAP) != 0; bool on_root = (decorators & IN_ROOT) != 0; bool oop_not_null = (decorators & OOP_NOT_NULL) != 0; + bool atomic = (decorators & MO_RELAXED) != 0; switch (type) { case T_OBJECT: case T_ARRAY: { if (on_heap) {
*** 104,113 **** --- 137,187 ---- assert(val != noreg, "not supported"); __ movptr(dst, val); } break; } + case T_BOOLEAN: + __ andl(val, 0x1); // boolean is true if LSB is 1 + __ movb(dst, val); + break; + case T_BYTE: + __ movb(dst, val); + break; + case T_SHORT: + __ movw(dst, val); + break; + case T_CHAR: + __ movw(dst, val); + break; + case T_INT: + __ movl(dst, val); + break; + case T_LONG: + assert(val == noreg, "only tos"); + #ifdef _LP64 + __ movq(dst, rax); + #else + if (atomic) { + __ push(rdx); + __ push(rax); // Must update atomically with FIST + __ fild_d(Address(rsp,0)); // So load into FPU register + __ fistp_d(dst); // and put into memory atomically + __ addptr(rsp, 2*wordSize); + } else { + __ movptr(dst, rax); + __ movptr(dst.plus_disp(wordSize), rdx); + } + #endif + break; + case T_FLOAT: + assert(val == noreg, "only tos"); + __ store_float(dst); + break; + case T_DOUBLE: + assert(val == noreg, "only tos"); + __ store_double(dst); + break; default: Unimplemented(); } } void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
< prev index next >