# HG changeset patch # User rkennke # Date 1476970840 -7200 # Thu Oct 20 15:40:40 2016 +0200 # Node ID 367fa88b1ef9111181e1d2d5ef1e5221a6e10109 # Parent e44bdff3b9d4d70b17472fdb2cd90e6009842b35 [mq]: verification.patch diff --git a/src/cpu/aarch64/vm/aarch64.ad b/src/cpu/aarch64/vm/aarch64.ad --- a/src/cpu/aarch64/vm/aarch64.ad +++ b/src/cpu/aarch64/vm/aarch64.ad @@ -3746,12 +3746,13 @@ } } -#define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \ +#define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN, STORE) \ MacroAssembler _masm(&cbuf); \ { \ guarantee(INDEX == -1, "mode not permitted for volatile"); \ guarantee(DISP == 0, "mode not permitted for volatile"); \ guarantee(SCALE == 0, "mode not permitted for volatile"); \ + if (STORE) { __ shenandoah_store_addr_check(as_Register(BASE)); } \ __ INSN(REG, as_Register(BASE)); \ } @@ -3763,7 +3764,7 @@ // Used for all non-volatile memory accesses. The use of // $mem->opcode() to discover whether this pattern uses sign-extended // offsets is something of a kludge. - static void loadStore(MacroAssembler masm, mem_insn insn, + static void loadStore(MacroAssembler masm, mem_insn insn, bool store, Register reg, int opcode, Register base, int index, int size, int disp) { @@ -3783,6 +3784,7 @@ scale = Address::lsl(size); } + if (store) masm.shenandoah_store_addr_check(base); if (index == -1) { (masm.*insn)(reg, Address(base, disp)); } else { @@ -3791,7 +3793,7 @@ } } - static void loadStore(MacroAssembler masm, mem_float_insn insn, + static void loadStore(MacroAssembler masm, mem_float_insn insn, bool store, FloatRegister reg, int opcode, Register base, int index, int size, int disp) { @@ -3806,7 +3808,8 @@ scale = Address::lsl(size); } - if (index == -1) { + if (store) masm.shenandoah_store_addr_check(base); + if (index == -1) { (masm.*insn)(reg, Address(base, disp)); } else { assert(disp == 0, "unsupported address mode: disp = %d", disp); @@ -3814,10 +3817,11 @@ } } - static void loadStore(MacroAssembler masm, mem_vector_insn insn, + static void loadStore(MacroAssembler masm, mem_vector_insn insn, bool store, FloatRegister reg, MacroAssembler::SIMD_RegVariant T, int opcode, Register base, int index, int size, int disp) { + if (store) masm.shenandoah_store_addr_check(base); if (index == -1) { (masm.*insn)(reg, T, Address(base, disp)); } else { @@ -3875,146 +3879,146 @@ enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, false, dst_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S, + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, false, dst_reg, MacroAssembler::S, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D, + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, false, dst_reg, MacroAssembler::D, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{ FloatRegister dst_reg = as_FloatRegister($dst$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q, + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, false, dst_reg, MacroAssembler::Q, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strb(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, true, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strb0(memory mem) %{ MacroAssembler _masm(&cbuf); - loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(), + loadStore(_masm, &MacroAssembler::strb, true, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strb0_ordered(memory mem) %{ MacroAssembler _masm(&cbuf); __ membar(Assembler::StoreStore); - loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(), + loadStore(_masm, &MacroAssembler::strb, true, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strh(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, true, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strh0(memory mem) %{ MacroAssembler _masm(&cbuf); - loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(), + loadStore(_masm, &MacroAssembler::strh, true, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strw(iRegI src, memory mem) %{ Register src_reg = as_Register($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, true, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strw0(memory mem) %{ MacroAssembler _masm(&cbuf); - loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(), + loadStore(_masm, &MacroAssembler::strw, true, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} @@ -4028,43 +4032,43 @@ __ mov(rscratch2, sp); src_reg = rscratch2; } - loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, true, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_str0(memory mem) %{ MacroAssembler _masm(&cbuf); - loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(), + loadStore(_masm, &MacroAssembler::str, true, zr, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strs(vRegF src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, true, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strd(vRegD src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, true, src_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strvS(vecD src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S, + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, true, src_reg, MacroAssembler::S, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strvD(vecD src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D, + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, true, src_reg, MacroAssembler::D, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} enc_class aarch64_enc_strvQ(vecX src, memory mem) %{ FloatRegister src_reg = as_FloatRegister($src$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q, + loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, true, src_reg, MacroAssembler::Q, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); %} @@ -4074,92 +4078,92 @@ enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{ MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, stlrb); + rscratch1, stlrb, true); %} enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{ MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, stlrh); + rscratch1, stlrh, true); %} enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{ MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, stlrw); + rscratch1, stlrw, true); %} enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarb); + rscratch1, ldarb, false); __ sxtbw(dst_reg, dst_reg); %} enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarb); + rscratch1, ldarb, false); __ sxtb(dst_reg, dst_reg); %} enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarb); + rscratch1, ldarb, false); %} enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarb); + rscratch1, ldarb, false); %} enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarh); + rscratch1, ldarh, false); __ sxthw(dst_reg, dst_reg); %} enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{ Register dst_reg = as_Register($dst$$reg); MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarh); + rscratch1, ldarh, false); __ sxth(dst_reg, dst_reg); %} enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarh); + rscratch1, ldarh, false); %} enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarh); + rscratch1, ldarh, false); %} enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarw); + rscratch1, ldarw, false); %} enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarw); + rscratch1, ldarw, false); %} enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{ MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldar); + rscratch1, ldar, false); %} enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{ MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldarw); + rscratch1, ldarw, false); __ fmovs(as_FloatRegister($dst$$reg), rscratch1); %} enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{ MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, ldar); + rscratch1, ldar, false); __ fmovd(as_FloatRegister($dst$$reg), rscratch1); %} @@ -4174,7 +4178,7 @@ src_reg = rscratch2; } MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, stlr); + rscratch1, stlr, true); %} enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{ @@ -4184,7 +4188,7 @@ __ fmovs(rscratch2, src_reg); } MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, stlrw); + rscratch1, stlrw, true); %} enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{ @@ -4194,7 +4198,7 @@ __ fmovd(rscratch2, src_reg); } MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, - rscratch1, stlr); + rscratch1, stlr, true); %} // synchronized read/update encodings @@ -4261,6 +4265,7 @@ enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{ MacroAssembler _masm(&cbuf); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ shenandoah_store_addr_check($mem$$base$$Register); __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); @@ -4269,6 +4274,7 @@ enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ MacroAssembler _masm(&cbuf); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ shenandoah_store_addr_check($mem$$base$$Register); __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, Assembler::word, /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); @@ -4279,6 +4285,7 @@ MacroAssembler _masm(&cbuf); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); Register tmp = $tmp$$Register; + __ shenandoah_store_check($mem$$base$$Register, $newval$$Register); __ mov(tmp, $oldval$$Register); // Must not clobber oldval. __ cmpxchg_oop_shenandoah($res$$base$$Register, $mem$$base$$Register, tmp, $newval$$Register, false, /*acquire*/ true, /*release*/ true); @@ -4291,6 +4298,7 @@ enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{ MacroAssembler _masm(&cbuf); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ shenandoah_store_addr_check($mem$$base$$Register); __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, Assembler::xword, /*acquire*/ true, /*release*/ true, /*weak*/ false, noreg); @@ -4299,6 +4307,7 @@ enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ MacroAssembler _masm(&cbuf); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); + __ shenandoah_store_addr_check($mem$$base$$Register); __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, Assembler::word, /*acquire*/ true, /*release*/ true, /*weak*/ false, noreg); @@ -4309,6 +4318,7 @@ MacroAssembler _masm(&cbuf); guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); Register tmp = $tmp$$Register; + __ shenandoah_store_check($mem$$base$$Register, $newval$$Register); __ mov(tmp, $oldval$$Register); // Must not clobber oldval. __ cmpxchg_oop_shenandoah($res$$base$$Register, $mem$$base$$Register, tmp, $newval$$Register, false, /*acquire*/ true, /*release*/ true); @@ -4745,6 +4755,8 @@ assert_different_registers(oop, box, tmp, disp_hdr); + __ shenandoah_store_addr_check(oop); + // Load markOop from object into displaced_header. __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes())); @@ -4903,6 +4915,8 @@ assert_different_registers(oop, box, tmp, disp_hdr); + __ shenandoah_store_addr_check(oop); + // Always do locking in runtime. if (EmitSync & 0x01) { __ cmp(oop, zr); // Oop can't be 0 here => always false. @@ -8483,8 +8497,7 @@ } } - if (reg != rscratch2) - __ shenandoah_store_check(reg, adr); + __ shenandoah_store_check(adr, reg); __ str(reg, adr); %} @@ -9052,7 +9065,7 @@ "mov $dst, $tmp\t# vector (1D)" %} ins_encode %{ FloatRegister tmp_reg = as_FloatRegister($tmp$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, false, tmp_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); @@ -9095,7 +9108,7 @@ "mov $dst, $tmp\t# vector (1D)" %} ins_encode %{ FloatRegister tmp_reg = as_FloatRegister($tmp$$reg); - loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(), + loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, false, tmp_reg, $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); diff --git a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp --- a/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp @@ -818,11 +818,15 @@ if (type == T_ARRAY || type == T_OBJECT) { __ verify_oop(src->as_register()); + __ shenandoah_store_check(as_Address(to_addr), src->as_register()); + if (UseCompressedOops && !wide) { __ encode_heap_oop(compressed_src, src->as_register()); } else { compressed_src = src->as_register(); } + } else { + __ shenandoah_store_addr_check(to_addr->base()->as_pointer_register()); } int null_check_here = code_offset(); @@ -842,7 +846,6 @@ if (UseCompressedOops && !wide) { __ strw(compressed_src, as_Address(to_addr, rscratch2)); } else { - __ shenandoah_store_check(compressed_src, as_Address(to_addr)); __ str(compressed_src, as_Address(to_addr)); } break; diff --git a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp --- a/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp @@ -69,7 +69,7 @@ verify_oop(obj); - shenandoah_store_check(obj); + shenandoah_store_addr_check(obj); // save object being locked into the BasicObjectLock str(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); @@ -134,8 +134,6 @@ // load object ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); - shenandoah_store_check(obj); - biased_locking_exit(obj, hdr, done); } @@ -149,7 +147,8 @@ ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); } verify_oop(obj); - shenandoah_store_check(obj); + + shenandoah_store_addr_check(obj); // test if object header is pointing to the displaced header, and if so, restore // the displaced header in the object - if the object header is not pointing to diff --git a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp --- a/src/cpu/aarch64/vm/interp_masm_aarch64.cpp +++ b/src/cpu/aarch64/vm/interp_masm_aarch64.cpp @@ -523,6 +523,7 @@ lea(c_rarg1, monitor); // address of first monitor ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + shenandoah_store_addr_check(r0); // Invariant cbnz(r0, unlock); pop(state); @@ -600,6 +601,7 @@ bind(loop); // check if current entry is used ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + shenandoah_store_addr_check(rscratch1); // Invariant cbnz(rscratch1, exception); add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry @@ -663,7 +665,7 @@ // Load object pointer into obj_reg %c_rarg3 ldr(obj_reg, Address(lock_reg, obj_offset)); - shenandoah_store_check(obj_reg); + shenandoah_store_addr_check(obj_reg); if (UseBiasedLocking) { biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case); @@ -764,7 +766,7 @@ // Load oop into obj_reg(%c_rarg3) ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); - shenandoah_store_check(obj_reg); + shenandoah_store_addr_check(obj_reg); // Free entry str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp --- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp @@ -415,6 +415,8 @@ Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes()); Address saved_mark_addr(lock_reg, 0); + shenandoah_store_addr_check(obj_reg); + // Biased locking // See whether the lock is currently biased toward our thread and // whether the epoch is still valid @@ -565,6 +567,7 @@ // a higher level. Second, if the bias was revoked while we held the // lock, the object could not be rebiased toward another thread, so // the bias bit would be clear. + shenandoah_store_addr_check(obj_reg); // Access mark word ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); cmp(temp_reg, markOopDesc::biased_lock_pattern); @@ -1975,7 +1978,7 @@ void MacroAssembler::stop(const char* msg, Label *l) { address ip = pc(); pusha(); - mov(c_rarg0, (address)msg); + lea(c_rarg0, ExternalAddress((address) msg)); if (! l) { adr(c_rarg1, (address)ip); } else { @@ -3617,7 +3620,6 @@ } void MacroAssembler::store_heap_oop(Address dst, Register src) { - shenandoah_store_check(src, dst); if (UseCompressedOops) { assert(!dst.uses(src), "not enough registers"); encode_heap_oop(src); @@ -5197,127 +5199,155 @@ // written to, and that fromspace pointers are not written into // objects during concurrent marking. These methods check for that. -const bool ShenandoahStoreCheck = false; - -void MacroAssembler::in_heap_check(Register r, Label &nope) { +void MacroAssembler::in_heap_check(Register r, Register tmp, Label &nope) { ShenandoahHeap *h = (ShenandoahHeap *)Universe::heap(); HeapWord* first_region_bottom = h->first_region_bottom(); HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * h->max_regions(); - mov(rscratch1, (uintptr_t)first_region_bottom); - cmp(r, rscratch1); + mov(tmp, (uintptr_t)first_region_bottom); + cmp(r, tmp); br(Assembler::LO, nope); - mov(rscratch1, (uintptr_t)last_region_end); - cmp(r, rscratch1); + mov(tmp, (uintptr_t)last_region_end); + cmp(r, tmp); br(Assembler::HS, nope); } -void MacroAssembler::shenandoah_store_check(Register r, Address dest) { - if (! ShenandoahStoreCheck) - return; - - assert_different_registers(rscratch1, rscratch2, r); - assert(! dest.uses(rscratch1), "invalid register"); - assert(! dest.uses(rscratch2), "invalid register"); - - assert(! InlineObjectCopy, "ShenandoahStoreCheck is incompatible with InlineObjectCopy"); +void MacroAssembler::shenandoah_cset_check(Register obj, Register tmp1, Register tmp2, Label& done) { + + // Test that oop is not in to-space. + lsr(tmp1, obj, ShenandoahHeapRegion::RegionSizeShift); + assert(ShenandoahHeap::in_cset_fast_test_addr() != 0, "sanity"); + mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr()); + ldrb(tmp2, Address(tmp2, tmp1)); + tbz(tmp2, 0, done); + + // Check for cancelled GC. + assert(ShenandoahHeap::cancelled_concgc_addr() != 0, "sanity"); + mov(tmp2, ShenandoahHeap::cancelled_concgc_addr()); + ldrb(tmp2, Address(tmp2)); + cbnz(tmp2, done); +} + +void MacroAssembler::_shenandoah_store_check(Address addr, Register value, const char* msg, const char* file, int line) { + _shenandoah_store_check(addr.base(), value, msg, file, line); +} + +void MacroAssembler::_shenandoah_store_check(Register addr, Register value, const char* msg, const char* file, int line) { + + if (! UseShenandoahGC || ! ShenandoahStoreCheck) return; + if (addr == r31_sp || addr == sp) return; // Stack-based target + + Register raddr = r8; + Register rval = r9; + Register tmp1 = r10; + Register tmp2 = r11; + + RegSet to_save = RegSet::of(raddr, rval, tmp1, tmp2); + + // Push tmp regs and flags. + push(to_save, sp); + get_cflags(tmp1); + push(RegSet::of(tmp1), sp); + + orr(rval, zr, value); + // mov(rval, value); + mov(raddr, addr); Label done; - cbz(r, done); - - mov(rscratch2, ShenandoahHeap::concurrent_mark_in_progress_addr()); - Assembler::ldrw(rscratch2, Address(rscratch2)); - cbzw(rscratch2, done); - - in_heap_check(r, done); - - // Check for object in collection set. - lsr(rscratch1, r, ShenandoahHeapRegion::RegionSizeShift); - mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); - ldrb(rscratch2, Address(rscratch2, rscratch1)); - tbz(rscratch2, 0, done); - - // Check for dest in heap - lea(rscratch2, dest); - in_heap_check(rscratch2, done); - - lsr(rscratch1, rscratch2, ShenandoahHeapRegion::RegionSizeShift); - mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); - ldrb(rscratch2, Address(rscratch2, rscratch1)); - tbz(rscratch2, 0, done); - - ldr(rscratch2, Address(r, BrooksPointer::byte_offset())); - - stop("Shenandoah: store of oop in collection set during marking!", &done); - should_not_reach_here(); + + // If not in-heap target, skip check. + in_heap_check(raddr, tmp1, done); + + // Test that target oop is not in to-space. + shenandoah_cset_check(raddr, tmp1, tmp2, done); + + // Do value-check only when concurrent mark is in progress. + mov(tmp1, ShenandoahHeap::concurrent_mark_in_progress_addr()); + ldrw(tmp1, Address(tmp1)); + cbzw(tmp1, done); + + // Null-check value. + cbz(rval, done); + + // Test that value oop is not in to-space. + shenandoah_cset_check(rval, tmp1, tmp2, done); + + // Failure. + // Pop tmp regs and flags. + pop(RegSet::of(tmp1), sp); + set_cflags(tmp1); + pop(to_save, sp); + const char* b = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("shenandoah_store_check: %s in file: %s line: %i", msg, file, line); + b = code_string(ss.as_string()); + } + // hlt(0); + + stop(b); bind(done); + // Pop tmp regs and flags. + pop(RegSet::of(tmp1), sp); + set_cflags(tmp1); + pop(to_save, sp); } -void MacroAssembler::shenandoah_store_check(Address dest) { - if (! ShenandoahStoreCheck) - return; - - assert(! dest.uses(rscratch1), "invalid register"); - assert(! dest.uses(rscratch2), "invalid register"); - - Label done, yes; - - ldr(rscratch2, Address(rthread, in_bytes(JavaThread::evacuation_in_progress_offset()))); - cbnzw(rscratch2, yes); - - mov(rscratch2, ShenandoahHeap::concurrent_mark_in_progress_addr()); - Assembler::ldrw(rscratch2, Address(rscratch2)); - cbzw(rscratch2, done); - - bind(yes); - - // Check for dest in heap - lea(rscratch2, dest); - cbz(rscratch2, done); - in_heap_check(rscratch2, done); - - lsr(rscratch1, rscratch2, ShenandoahHeapRegion::RegionSizeShift); - mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); - ldrb(rscratch2, Address(rscratch2, rscratch1)); - tbz(rscratch2, 0, done); - - stop("Shenandoah: store in collection set during marking/evacuation!", &done); - should_not_reach_here(); +void MacroAssembler::_shenandoah_store_addr_check(Address addr, const char* msg, const char* file, int line) { + _shenandoah_store_addr_check(addr.base(), msg, file, line); +} + +void MacroAssembler::_shenandoah_store_addr_check(Register dst, const char* msg, const char* file, int line) { + + if (! UseShenandoahGC || ! ShenandoahStoreCheck) return; + if (dst == r31_sp || dst == sp) return; // Stack-based target + + Register addr = r8; + Register tmp1 = r9; + Register tmp2 = r10; + + Label done; + RegSet to_save = RegSet::of(addr, tmp1, tmp2); + + // Push tmp regs and flags. + push(to_save, sp); + get_cflags(tmp1); + push(RegSet::of(tmp1), sp); + + orr(addr, zr, dst); + // mov(addr, dst); + + // Check null. + cbz(addr, done); + + in_heap_check(addr, tmp1, done); + + shenandoah_cset_check(addr, tmp1, tmp2, done); + + // Fail. + // Pop tmp regs and flags. + pop(RegSet::of(tmp1), sp); + set_cflags(tmp1); + pop(to_save, sp); + const char* b = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("shenandoah_store_check: %s in file: %s line: %i", msg, file, line); + b = code_string(ss.as_string()); + } + // hlt(0); + stop(b); + // should_not_reach_here(); bind(done); + // Pop tmp regs and flags. + pop(RegSet::of(tmp1), sp); + set_cflags(tmp1); + pop(to_save, sp); + } -void MacroAssembler::shenandoah_store_check(Register dest) { - if (! ShenandoahStoreCheck) - return; - - assert_different_registers(rscratch1, rscratch2, dest); - - Label done, yes; - - ldr(rscratch2, Address(rthread, in_bytes(JavaThread::evacuation_in_progress_offset()))); - cbnzw(rscratch2, yes); - - mov(rscratch2, ShenandoahHeap::concurrent_mark_in_progress_addr()); - Assembler::ldrw(rscratch2, Address(rscratch2)); - cbzw(rscratch2, done); - - bind(yes); - - // Check for dest in heap - cbz(dest, done); - in_heap_check(dest, done); - - lsr(rscratch1, dest, ShenandoahHeapRegion::RegionSizeShift); - mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); - ldrb(rscratch2, Address(rscratch2, rscratch1)); - tbz(rscratch2, 0, done); - - stop("Shenandoah: store in collection set during marking/evacuation!", &done); - should_not_reach_here(); - - bind(done); -} - diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp --- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp +++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp @@ -541,6 +541,17 @@ msr(0b011, 0b0100, 0b0100, 0b001, zr); } + // Macro instructions for accessing and updating the condition flags + inline void get_cflags(Register reg) + { + mrs(0b011, 0b0100, 0b0010, 0b000, reg); + } + + inline void set_cflags(Register reg) + { + msr(0b011, 0b0100, 0b0010, 0b000, reg); + } + // DCZID_EL0: op1 == 011 // CRn == 0000 // CRm == 0000 @@ -1239,10 +1250,19 @@ Register tmp3, Register tmp4, int int_cnt1, Register result, int ae); - void in_heap_check(Register r, Label &nope); - void shenandoah_store_check(Register r, Address addr); - void shenandoah_store_check(Address addr); - void shenandoah_store_check(Register addr); + void in_heap_check(Register r, Register tmp, Label &nope); + +private: + void shenandoah_cset_check(Register obj, Register tmp1, Register tmp2, Label& done); + +public: + void _shenandoah_store_addr_check(Register addr, const char* msg, const char* file, int line); + void _shenandoah_store_addr_check(Address addr, const char* msg, const char* file, int line); +#define shenandoah_store_addr_check(reg) _shenandoah_store_addr_check(reg, "oop not safe for writing", __FILE__, __LINE__) + + void _shenandoah_store_check(Address addr, Register value, const char* msg, const char* file, int line); + void _shenandoah_store_check(Register addr, Register value, const char* msg, const char* file, int line); +#define shenandoah_store_check(addr, value) _shenandoah_store_check(addr, value, "oop not safe for writing", __FILE__, __LINE__) private: void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, diff --git a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp --- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp +++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp @@ -1826,6 +1826,7 @@ } // Load (object->mark() | 1) into swap_reg r0 + __ shenandoah_store_addr_check(obj_reg); // Access mark word __ ldr(rscratch1, Address(obj_reg, 0)); __ orr(swap_reg, rscratch1, 1); @@ -1990,7 +1991,7 @@ Label done; - __ shenandoah_store_check(obj_reg); + __ shenandoah_store_addr_check(obj_reg); if (UseBiasedLocking) { __ biased_locking_exit(obj_reg, old_hdr, done); diff --git a/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp --- a/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp +++ b/src/cpu/aarch64/vm/stubRoutines_aarch64.hpp @@ -41,7 +41,7 @@ enum platform_dependent_constants { code_size1 = 19000, // simply increase if too small (assembler will crash if too small) - code_size2 = 22000 // simply increase if too small (assembler will crash if too small) + code_size2 = 23000 // simply increase if too small (assembler will crash if too small) }; class aarch64 { diff --git a/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp --- a/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp +++ b/src/cpu/aarch64/vm/templateInterpreterGenerator_aarch64.cpp @@ -774,6 +774,7 @@ __ mov(rscratch1, esp); __ str(rscratch1, monitor_block_top); // set new monitor block top // store object + __ shenandoah_store_addr_check(r0); __ str(r0, Address(esp, BasicObjectLock::obj_offset_in_bytes())); __ mov(c_rarg1, esp); // object address __ lock_object(c_rarg1); @@ -1440,6 +1441,7 @@ wordSize - sizeof(BasicObjectLock)))); __ ldr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + __ shenandoah_store_addr_check(t); // Invariant __ cbnz(t, unlock); // Entry already unlocked, need to throw exception diff --git a/src/cpu/aarch64/vm/templateTable_aarch64.cpp b/src/cpu/aarch64/vm/templateTable_aarch64.cpp --- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp +++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp @@ -3744,6 +3744,7 @@ // check if current entry is used // if not used then remember entry in c_rarg1 __ ldr(rscratch1, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes())); + __ shenandoah_store_addr_check(rscratch1); // Invariant oopDesc::bs()->interpreter_read_barrier(_masm, rscratch1); __ cmp(zr, rscratch1); __ csel(c_rarg1, c_rarg3, c_rarg1, Assembler::EQ); @@ -3804,6 +3805,7 @@ __ increment(rbcp); // store object + __ shenandoah_store_addr_check(r0); // Invariant __ str(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); __ lock_object(c_rarg1); @@ -3849,6 +3851,7 @@ __ bind(loop); // check if current entry is for same object __ ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes())); + __ shenandoah_store_addr_check(rscratch1); // Invariant oopDesc::bs()->interpreter_read_barrier(_masm, rscratch1); __ cmp(r0, rscratch1); // if same object then stop searching diff --git a/src/cpu/x86/vm/assembler_x86.cpp b/src/cpu/x86/vm/assembler_x86.cpp --- a/src/cpu/x86/vm/assembler_x86.cpp +++ b/src/cpu/x86/vm/assembler_x86.cpp @@ -2029,6 +2029,16 @@ } } +void Assembler::jccb_if_possible(Condition cc, Label& L) { + +#ifdef ASSERT + if (UseShenandoahGC) { + jcc(cc, L); + } else +#endif + jccb(cc, L); +} + void Assembler::jmp(Address adr) { InstructionMark im(this); prefix(adr); @@ -2102,6 +2112,16 @@ } } +void Assembler::jmpb_if_possible(Label& L) { + +#ifdef ASSERT + if (UseShenandoahGC) { + jmp(L); + } else +#endif + jmpb(L); +} + void Assembler::ldmxcsr( Address src) { NOT_LP64(assert(VM_Version::supports_sse(), "")); InstructionMark im(this); diff --git a/src/cpu/x86/vm/assembler_x86.hpp b/src/cpu/x86/vm/assembler_x86.hpp --- a/src/cpu/x86/vm/assembler_x86.hpp +++ b/src/cpu/x86/vm/assembler_x86.hpp @@ -1266,6 +1266,7 @@ // not bound within an 8-bit offset of this instruction, a run-time error // will occur. void jccb(Condition cc, Label& L); + void jccb_if_possible(Condition cc, Label& L); void jmp(Address entry); // pc <- entry @@ -1279,6 +1280,7 @@ // not bound within an 8-bit offset of this instruction, a run-time error // will occur. void jmpb(Label& L); + void jmpb_if_possible(Label& L); void ldmxcsr( Address src ); diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp @@ -933,6 +933,7 @@ if (type == T_ARRAY || type == T_OBJECT) { __ verify_oop(src->as_register()); + __ shenandoah_store_check(as_Address(to_addr), src->as_register()); #ifdef _LP64 if (UseCompressedOops && !wide) { __ movptr(compressed_src, src->as_register()); @@ -942,6 +943,8 @@ } } #endif + } else { + __ shenandoah_store_addr_check(to_addr->base()->as_pointer_register()); } if (patch_code != lir_patch_none) { diff --git a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp @@ -46,6 +46,8 @@ verify_oop(obj); + shenandoah_store_addr_check(obj); + // save object being locked into the BasicObjectLock movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj); @@ -123,6 +125,9 @@ movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes())); } verify_oop(obj); + + shenandoah_store_addr_check(obj); + // test if object header is pointing to the displaced header, and if so, restore // the displaced header in the object - if the object header is not pointing to // the displaced header, get the object header instead diff --git a/src/cpu/x86/vm/interp_masm_x86.cpp b/src/cpu/x86/vm/interp_masm_x86.cpp --- a/src/cpu/x86/vm/interp_masm_x86.cpp +++ b/src/cpu/x86/vm/interp_masm_x86.cpp @@ -961,6 +961,7 @@ lea(robj, monitor); // address of first monitor movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes())); + shenandoah_store_addr_check(rax); // Invariant testptr(rax, rax); jcc(Assembler::notZero, unlock); @@ -1043,6 +1044,7 @@ bind(loop); // check if current entry is used + shenandoah_lock_check(rmon); cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL); jcc(Assembler::notEqual, exception); @@ -1140,6 +1142,8 @@ // Load object pointer into obj_reg movptr(obj_reg, Address(lock_reg, obj_offset)); + shenandoah_store_addr_check(obj_reg); + if (UseBiasedLocking) { biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case); } @@ -1156,6 +1160,7 @@ assert(lock_offset == 0, "displaced header must be first word in BasicObjectLock"); + // obj_reg has been checked a few lines up. if (os::is_MP()) lock(); cmpxchgptr(lock_reg, Address(obj_reg, 0)); if (PrintBiasedLockingStatistics) { @@ -1234,6 +1239,7 @@ // Load oop into obj_reg(%c_rarg3) movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); + shenandoah_store_addr_check(obj_reg); // Invariant // Free entry movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); diff --git a/src/cpu/x86/vm/macroAssembler_x86.cpp b/src/cpu/x86/vm/macroAssembler_x86.cpp --- a/src/cpu/x86/vm/macroAssembler_x86.cpp +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp @@ -45,6 +45,8 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/heapRegion.hpp" +#include "gc/shenandoah/shenandoahHeap.inline.hpp" +#include "gc/shenandoah/shenandoahHeapRegion.hpp" #endif // INCLUDE_ALL_GCS #include "crc32c.h" #ifdef COMPILER2 @@ -1106,6 +1108,8 @@ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes()); NOT_LP64( Address saved_mark_addr(lock_reg, 0); ) + shenandoah_store_addr_check(obj_reg); + if (PrintBiasedLockingStatistics && counters == NULL) { counters = BiasedLocking::counters(); } @@ -1169,7 +1173,7 @@ // the prototype header is no longer biased and we have to revoke // the bias on this object. testptr(header_reg, markOopDesc::biased_lock_mask_in_place); - jccb(Assembler::notZero, try_revoke_bias); + jccb_if_possible(Assembler::notZero, try_revoke_bias); // Biasing is still enabled for this data type. See whether the // epoch of the current bias is still valid, meaning that the epoch @@ -1181,7 +1185,7 @@ // otherwise the manipulations it performs on the mark word are // illegal. testptr(header_reg, markOopDesc::epoch_mask_in_place); - jccb(Assembler::notZero, try_rebias); + jccb_if_possible(Assembler::notZero, try_rebias); // The epoch of the current bias is still valid but we know nothing // about the owner; it might be set or it might be clear. Try to @@ -1290,6 +1294,7 @@ // a higher level. Second, if the bias was revoked while we held the // lock, the object could not be rebiased toward another thread, so // the bias bit would be clear. + shenandoah_store_addr_check(obj_reg); // Access mark word movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); andptr(temp_reg, markOopDesc::biased_lock_mask_in_place); cmpptr(temp_reg, markOopDesc::biased_lock_pattern); @@ -1482,6 +1487,7 @@ movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort bind(L_rtm_retry); } + shenandoah_store_addr_check(objReg); // Access mark word movptr(tmpReg, Address(objReg, 0)); testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased jcc(Assembler::notZero, IsInflated); @@ -1558,6 +1564,7 @@ bind(L_noincrement); } xbegin(L_on_abort); + shenandoah_store_addr_check(objReg); // Access mark word movptr(tmpReg, Address(objReg, 0)); movptr(tmpReg, Address(tmpReg, owner_offset)); testptr(tmpReg, tmpReg); @@ -1704,6 +1711,8 @@ assert_different_registers(objReg, boxReg, tmpReg, scrReg); } + shenandoah_store_addr_check(objReg); // Access mark word + if (counters != NULL) { atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg); } @@ -1753,7 +1762,7 @@ movptr(tmpReg, Address(objReg, 0)); // [FETCH] testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased - jccb(Assembler::notZero, IsInflated); + jccb_if_possible(Assembler::notZero, IsInflated); // Attempt stack-locking ... orptr (tmpReg, markOopDesc::unlocked_value); @@ -1830,7 +1839,7 @@ // Test-And-CAS instead of CAS movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner testptr(tmpReg, tmpReg); // Locked ? - jccb (Assembler::notZero, DONE_LABEL); + jccb_if_possible(Assembler::notZero, DONE_LABEL); } // Appears unlocked - try to swing _owner from null to non-null. @@ -1848,7 +1857,7 @@ movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3 // If we weren't able to swing _owner from NULL to the BasicLock // then take the slow path. - jccb (Assembler::notZero, DONE_LABEL); + jccb_if_possible(Assembler::notZero, DONE_LABEL); // update _owner from BasicLock to thread get_thread (scrReg); // beware: clobbers ICCs movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg); @@ -1878,7 +1887,7 @@ // Can suffer RTS->RTO upgrades on shared or cold $ lines movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner testptr(tmpReg, tmpReg); // Locked ? - jccb (Assembler::notZero, DONE_LABEL); + jccb_if_possible(Assembler::notZero, DONE_LABEL); } // Appears unlocked - try to swing _owner from null to non-null. @@ -1966,6 +1975,8 @@ assert(boxReg == rax, ""); assert_different_registers(objReg, boxReg, tmpReg); + shenandoah_store_addr_check(objReg); // Access mark word + if (EmitSync & 4) { // Disable - inhibit all inlining. Force control through the slow-path cmpptr (rsp, 0); @@ -2007,7 +2018,7 @@ testptr(boxReg, boxReg); jccb(Assembler::notZero, L_regular_inflated_unlock); xend(); - jmpb(DONE_LABEL); + jmpb_if_possible(DONE_LABEL); bind(L_regular_inflated_unlock); } #endif @@ -2051,17 +2062,17 @@ orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); - jccb (Assembler::notZero, DONE_LABEL); + jccb_if_possible(Assembler::notZero, DONE_LABEL); movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); - jmpb (DONE_LABEL); + jmpb_if_possible(DONE_LABEL); } else { orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); - jccb (Assembler::notZero, DONE_LABEL); + jccb_if_possible(Assembler::notZero, DONE_LABEL); movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); jccb (Assembler::notZero, CheckSucc); movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); - jmpb (DONE_LABEL); + jmpb_if_possible(DONE_LABEL); } // The Following code fragment (EmitSync & 65536) improves the performance of @@ -2131,11 +2142,11 @@ bind (LGoSlowPath); orptr(boxReg, 1); // set ICC.ZF=0 to indicate failure - jmpb (DONE_LABEL); + jmpb_if_possible(DONE_LABEL); bind (LSuccess); xorptr(boxReg, boxReg); // set ICC.ZF=1 to indicate success - jmpb (DONE_LABEL); + jmpb_if_possible(DONE_LABEL); } bind (Stacked); @@ -2173,12 +2184,12 @@ xorptr(boxReg, boxReg); } orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); - jccb (Assembler::notZero, DONE_LABEL); + jccb_if_possible(Assembler::notZero, DONE_LABEL); movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); jccb (Assembler::notZero, CheckSucc); movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); - jmpb (DONE_LABEL); + jmpb_if_possible(DONE_LABEL); if ((EmitSync & 65536) == 0) { // Try to avoid passing control into the slow_path ... @@ -2235,11 +2246,11 @@ bind (LGoSlowPath); orl (boxReg, 1); // set ICC.ZF=0 to indicate failure - jmpb (DONE_LABEL); + jmpb_if_possible(DONE_LABEL); bind (LSuccess); testl (boxReg, 0); // set ICC.ZF=1 to indicate success - jmpb (DONE_LABEL); + jmpb_if_possible (DONE_LABEL); } bind (Stacked); @@ -6077,6 +6088,166 @@ BLOCK_COMMENT("} verify_oop"); } +void MacroAssembler::in_heap_check(Register raddr, Label& done) { + ShenandoahHeap *h = (ShenandoahHeap *)Universe::heap(); + + HeapWord* first_region_bottom = h->first_region_bottom(); + HeapWord* last_region_end = first_region_bottom + (ShenandoahHeapRegion::RegionSizeBytes / HeapWordSize) * h->max_regions(); + + cmpptr(raddr, (intptr_t) first_region_bottom); + jcc(Assembler::less, done); + cmpptr(raddr, (intptr_t) first_region_bottom); + jcc(Assembler::greaterEqual, done); + +} + +void MacroAssembler::shenandoah_cset_check(Register raddr, Register tmp1, Register tmp2, Label& done) { + // Test that oop is not in to-space. + movptr(tmp1, raddr); + shrptr(tmp1, ShenandoahHeapRegion::RegionSizeShift); + movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr()); + movbool(tmp2, Address(tmp2, tmp1, Address::times_1)); + testbool(tmp2); + jcc(Assembler::zero, done); + + // Check for cancelled GC. + movptr(tmp2, (intptr_t) ShenandoahHeap::cancelled_concgc_addr()); + movbool(tmp2, Address(tmp2, 0)); + testbool(tmp2); + jcc(Assembler::notZero, done); + +} + +void MacroAssembler::_shenandoah_store_addr_check(Address addr, const char* msg, const char* file, int line) { + _shenandoah_store_addr_check(addr.base(), msg, file, line); +} + +void MacroAssembler::_shenandoah_store_addr_check(Register dst, const char* msg, const char* file, int line) { + if (! UseShenandoahGC && ! ShenandoahStoreCheck) return; + if (dst == rsp) return; // Stack-based target + + Register raddr = r9; + Register tmp1 = r10; + Register tmp2 = r11; + + Label done; + + pushf(); + push(raddr); + push(tmp1); + push(tmp2); + + movptr(raddr, dst); + + // Check null. + testptr(raddr, raddr); + jcc(Assembler::zero, done); + + in_heap_check(raddr, done); + shenandoah_cset_check(raddr, tmp1, tmp2, done); + + // Fail. + pop(tmp2); + pop(tmp1); + pop(raddr); + popf(); + const char* b = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("shenandoah_store_check: %s in file: %s line: %i", msg, file, line); + b = code_string(ss.as_string()); + } + stop(b); + + bind(done); + + pop(tmp2); + pop(tmp1); + pop(raddr); + popf(); +} + +void MacroAssembler::_shenandoah_store_check(Register dst, Register value, const char* msg, const char* file, int line) { + if (! UseShenandoahGC && ! ShenandoahStoreCheck) return; + if (dst == rsp) return; // Stack-based target + + Register raddr = r8; + Register rval = r9; + Register tmp1 = r10; + Register tmp2 = r11; + + // Push tmp regs and flags. + pushf(); + push(raddr); + push(rval); + push(tmp1); + push(tmp2); + + movptr(raddr, dst); + movptr(rval, value); + + Label done; + + // If not in-heap target, skip check. + in_heap_check(raddr, done); + + // Test that target oop is not in to-space. + shenandoah_cset_check(raddr, tmp1, tmp2, done); + + // Do value-check only when concurrent mark is in progress. + movptr(tmp1, (intptr_t) ShenandoahHeap::concurrent_mark_in_progress_addr()); + movbool(tmp1, Address(tmp1, 0)); + testbool(tmp1); + jcc(Assembler::zero, done); + + // Null-check value. + testptr(rval, rval); + jcc(Assembler::zero, done); + + // Test that value oop is not in to-space. + shenandoah_cset_check(rval, tmp1, tmp2, done); + + // Failure. + // Pop tmp regs and flags. + pop(tmp2); + pop(tmp1); + pop(rval); + pop(raddr); + popf(); + const char* b = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("shenandoah_store_check: %s in file: %s line: %i", msg, file, line); + b = code_string(ss.as_string()); + } + stop(b); + + bind(done); + + // Pop tmp regs and flags. + pop(tmp2); + pop(tmp1); + pop(rval); + pop(raddr); + popf(); +} + +void MacroAssembler::_shenandoah_store_check(Address addr, Register value, const char* msg, const char* file, int line) { + _shenandoah_store_check(addr.base(), value, msg, file, line); +} + +void MacroAssembler::_shenandoah_lock_check(Register dst, const char* msg, const char* file, int line) { +#ifdef ASSERT + if (! UseShenandoahGC && ! ShenandoahStoreCheck) return; + + push(r8); + movptr(r8, Address(dst, BasicObjectLock::obj_offset_in_bytes())); + _shenandoah_store_addr_check(r8, msg, file, line); + pop(r8); +#endif +} RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, diff --git a/src/cpu/x86/vm/macroAssembler_x86.hpp b/src/cpu/x86/vm/macroAssembler_x86.hpp --- a/src/cpu/x86/vm/macroAssembler_x86.hpp +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp @@ -585,6 +585,20 @@ void verify_oop(Register reg, const char* s = "broken oop"); void verify_oop_addr(Address addr, const char * s = "broken oop addr"); + void in_heap_check(Register raddr, Label& done); + void shenandoah_cset_check(Register raddr, Register tmp1, Register tmp2, Label& done); + + void _shenandoah_store_addr_check(Register dst, const char* msg, const char* file, int line); + void _shenandoah_store_addr_check(Address dst, const char* msg, const char* file, int line); +#define shenandoah_store_addr_check(reg) _shenandoah_store_addr_check(reg, "oop not safe for writing", __FILE__, __LINE__) + + void _shenandoah_store_check(Address addr, Register value, const char* msg, const char* file, int line); + void _shenandoah_store_check(Register addr, Register value, const char* msg, const char* file, int line); +#define shenandoah_store_check(addr, value) _shenandoah_store_check(addr, value, "oop not safe for writing", __FILE__, __LINE__) + + void _shenandoah_lock_check(Register dst, const char* msg, const char* file, int line); +#define shenandoah_lock_check(reg) _shenandoah_lock_check(reg, "lock/oop not safe for writing", __FILE__, __LINE__) + // TODO: verify method and klass metadata (compare against vptr?) void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {} void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){} diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp @@ -2369,6 +2369,7 @@ __ movl(swap_reg, 1); // Load (object->mark() | 1) into swap_reg %rax + __ shenandoah_store_addr_check(obj_reg); // Access mark word __ orptr(swap_reg, Address(obj_reg, 0)); // Save (object->mark() | 1) into BasicLock's displaced header @@ -2533,6 +2534,7 @@ Label done; + __ shenandoah_store_addr_check(obj_reg); if (UseBiasedLocking) { __ biased_locking_exit(obj_reg, old_hdr, done); } diff --git a/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp b/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp --- a/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp +++ b/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp @@ -624,6 +624,7 @@ __ subptr(rsp, entry_size); // add space for a monitor entry __ movptr(monitor_block_top, rsp); // set new monitor block top // store object + __ shenandoah_store_addr_check(rax); __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1); __ movptr(lockreg, rsp); // object address @@ -1269,6 +1270,7 @@ __ lea(regmon, monitor); // address of first monitor __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes())); + __ shenandoah_store_addr_check(t); // Invariant __ testptr(t, t); __ jcc(Assembler::notZero, unlock); diff --git a/src/cpu/x86/vm/templateTable_x86.cpp b/src/cpu/x86/vm/templateTable_x86.cpp --- a/src/cpu/x86/vm/templateTable_x86.cpp +++ b/src/cpu/x86/vm/templateTable_x86.cpp @@ -4302,11 +4302,7 @@ // starting with top-most entry __ lea(rbot, monitor_block_bot); // points to word before bottom // of monitor block - if (UseShenandoahGC && ShenandoahVerifyReadsToFromSpace) { - __ jmp(entry); - } else { - __ jmpb(entry); - } + __ jmpb_if_possible(entry); __ bind(loop); // check if current entry is used @@ -4315,6 +4311,7 @@ __ cmovptr(Assembler::equal, rmon, rtop); // cmov => cmovptr // check if current entry is for same object __ movptr(rscratch1, Address(rtop, BasicObjectLock::obj_offset_in_bytes())); + __ shenandoah_store_addr_check(rscratch1); // Invariant oopDesc::bs()->interpreter_read_barrier(_masm, rscratch1); __ cmpptr(rax, rscratch1); // if same object then stop searching @@ -4365,6 +4362,7 @@ __ increment(rbcp); // store object + __ shenandoah_store_addr_check(rax); // Invariant __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax); __ lock_object(rmon); @@ -4406,15 +4404,12 @@ // starting with top-most entry __ lea(rbot, monitor_block_bot); // points to word before bottom // of monitor block - if (UseShenandoahGC && ShenandoahVerifyReadsToFromSpace) { - __ jmp(entry); - } else { - __ jmpb(entry); - } + __ jmpb_if_possible(entry); __ bind(loop); // check if current entry is for same object __ movptr(rscratch1, Address(rtop, BasicObjectLock::obj_offset_in_bytes())); + __ shenandoah_store_addr_check(rscratch1); // Invariant oopDesc::bs()->interpreter_read_barrier(_masm, rscratch1); __ cmpptr(rax, rscratch1); // if same object then stop searching diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.cpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.cpp @@ -2156,6 +2156,10 @@ return (address) (ShenandoahHeap::heap()->_in_cset_fast_test); } +address ShenandoahHeap::cancelled_concgc_addr() { + return (address) &(ShenandoahHeap::heap()->_cancelled_concgc); +} + void ShenandoahHeap::clear_cset_fast_test() { assert(_in_cset_fast_test_base != NULL, "sanity"); memset(_in_cset_fast_test_base, false, diff --git a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp --- a/src/share/vm/gc/shenandoah/shenandoahHeap.hpp +++ b/src/share/vm/gc/shenandoah/shenandoahHeap.hpp @@ -219,6 +219,7 @@ static ShenandoahHeap* heap_no_check(); static size_t conservative_max_heap_alignment(); static address in_cset_fast_test_addr(); + static address cancelled_concgc_addr(); static void pretouch_storage(char* start, char* end, WorkGang* workers); diff --git a/src/share/vm/gc/shenandoah/shenandoah_globals.hpp b/src/share/vm/gc/shenandoah/shenandoah_globals.hpp --- a/src/share/vm/gc/shenandoah/shenandoah_globals.hpp +++ b/src/share/vm/gc/shenandoah/shenandoah_globals.hpp @@ -117,6 +117,10 @@ diagnostic(bool, ShenandoahReadBarrier, true, \ "Turn on/off read barriers in Shenandoah") \ \ + diagnostic(bool, ShenandoahStoreCheck, false, \ + "Emit additional code that checks objects are written to only" \ + " in to-space") \ + \ develop(bool, ShenandoahDumpHeapBeforeConcurrentMark, false, \ "Dump the ShenanodahHeap Before Each ConcurrentMark") \ \