4245 // should we ever get anything other than this case?
4246 __ stlxr(rscratch1, src_reg, base);
4247 }
4248 } else {
4249 Register index_reg = as_Register(index);
4250 if (disp == 0) {
4251 __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4252 __ stlxr(rscratch1, src_reg, rscratch2);
4253 } else {
4254 __ lea(rscratch2, Address(base, disp));
4255 __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4256 __ stlxr(rscratch1, src_reg, rscratch2);
4257 }
4258 }
4259 __ cmpw(rscratch1, zr);
4260 %}
4261
4262 enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4263 MacroAssembler _masm(&cbuf);
4264 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4265 __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4266 &Assembler::ldxr, &MacroAssembler::cmp, &Assembler::stlxr);
4267 %}
4268
4269 enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4270 MacroAssembler _masm(&cbuf);
4271 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4272 __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4273 &Assembler::ldxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4274 %}
4275
4276
4277 // The only difference between aarch64_enc_cmpxchg and
4278 // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4279 // CompareAndSwap sequence to serve as a barrier on acquiring a
4280 // lock.
4281 enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4282 MacroAssembler _masm(&cbuf);
4283 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4284 __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4285 &Assembler::ldaxr, &MacroAssembler::cmp, &Assembler::stlxr);
4286 %}
4287
4288 enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4289 MacroAssembler _masm(&cbuf);
4290 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4291 __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
4292 &Assembler::ldaxrw, &MacroAssembler::cmpw, &Assembler::stlxrw);
4293 %}
4294
4295
4296 // auxiliary used for CompareAndSwapX to set result register
4297 enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4298 MacroAssembler _masm(&cbuf);
4299 Register res_reg = as_Register($res$$reg);
4300 __ cset(res_reg, Assembler::EQ);
4301 %}
4302
4303 // prefetch encodings
4304
4305 enc_class aarch64_enc_prefetchw(memory mem) %{
4306 MacroAssembler _masm(&cbuf);
4307 Register base = as_Register($mem$$base);
4308 int index = $mem$$index;
4309 int scale = $mem$$scale;
4310 int disp = $mem$$disp;
4311 if (index == -1) {
4312 __ prfm(Address(base, disp), PSTL1KEEP);
4790 if ((EmitSync & 0x02) == 0) {
4791 // we can use AArch64's bit test and branch here but
4792 // markoopDesc does not define a bit index just the bit value
4793 // so assert in case the bit pos changes
4794 # define __monitor_value_log2 1
4795 assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4796 __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4797 # undef __monitor_value_log2
4798 }
4799
4800 // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4801 __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4802
4803 // Load Compare Value application register.
4804
4805 // Initialize the box. (Must happen before we update the object mark!)
4806 __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4807
4808 // Compare object markOop with mark and if equal exchange scratch1
4809 // with object markOop.
4810 {
4811 Label retry_load;
4812 __ bind(retry_load);
4813 __ ldaxr(tmp, oop);
4814 __ cmp(tmp, disp_hdr);
4815 __ br(Assembler::NE, cas_failed);
4816 // use stlxr to ensure update is immediately visible
4817 __ stlxr(tmp, box, oop);
4818 __ cbzw(tmp, cont);
4819 __ b(retry_load);
4820 }
4821
4822 // Formerly:
4823 // __ cmpxchgptr(/*oldv=*/disp_hdr,
4824 // /*newv=*/box,
4825 // /*addr=*/oop,
4826 // /*tmp=*/tmp,
4827 // cont,
4828 // /*fail*/NULL);
4829
4830 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4840 __ mov(rscratch1, sp);
4841 __ sub(disp_hdr, disp_hdr, rscratch1);
4842 __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4843 // If condition is true we are cont and hence we can store 0 as the
4844 // displaced header in the box, which indicates that it is a recursive lock.
4845 __ ands(tmp/*==0?*/, disp_hdr, tmp);
4846 __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4847
4848 // Handle existing monitor.
4849 if ((EmitSync & 0x02) == 0) {
4850 __ b(cont);
4851
4852 __ bind(object_has_monitor);
4853 // The object's monitor m is unlocked iff m->owner == NULL,
4854 // otherwise m->owner may contain a thread or a stack address.
4855 //
4856 // Try to CAS m->owner from NULL to current thread.
4857 __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4858 __ mov(disp_hdr, zr);
4859
4860 {
4861 Label retry_load, fail;
4862 __ bind(retry_load);
4863 __ ldaxr(rscratch1, tmp);
4864 __ cmp(disp_hdr, rscratch1);
4865 __ br(Assembler::NE, fail);
4866 // use stlxr to ensure update is immediately visible
4867 __ stlxr(rscratch1, rthread, tmp);
4868 __ cbnzw(rscratch1, retry_load);
4869 __ bind(fail);
4870 }
4871
4872 // Label next;
4873 // __ cmpxchgptr(/*oldv=*/disp_hdr,
4874 // /*newv=*/rthread,
4875 // /*addr=*/tmp,
4876 // /*tmp=*/rscratch1,
4877 // /*succeed*/next,
4878 // /*fail*/NULL);
4879 // __ bind(next);
4880
4929 }
4930
4931 // Find the lock address and load the displaced header from the stack.
4932 __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4933
4934 // If the displaced header is 0, we have a recursive unlock.
4935 __ cmp(disp_hdr, zr);
4936 __ br(Assembler::EQ, cont);
4937
4938
4939 // Handle existing monitor.
4940 if ((EmitSync & 0x02) == 0) {
4941 __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4942 __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4943 }
4944
4945 // Check if it is still a light weight lock, this is is true if we
4946 // see the stack address of the basicLock in the markOop of the
4947 // object.
4948
4949 {
4950 Label retry_load;
4951 __ bind(retry_load);
4952 __ ldxr(tmp, oop);
4953 __ cmp(box, tmp);
4954 __ br(Assembler::NE, cas_failed);
4955 // use stlxr to ensure update is immediately visible
4956 __ stlxr(tmp, disp_hdr, oop);
4957 __ cbzw(tmp, cont);
4958 __ b(retry_load);
4959 }
4960
4961 // __ cmpxchgptr(/*compare_value=*/box,
4962 // /*exchange_value=*/disp_hdr,
4963 // /*where=*/oop,
4964 // /*result=*/tmp,
4965 // cont,
4966 // /*cas_failed*/NULL);
4967 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4968
4969 __ bind(cas_failed);
|
4245 // should we ever get anything other than this case?
4246 __ stlxr(rscratch1, src_reg, base);
4247 }
4248 } else {
4249 Register index_reg = as_Register(index);
4250 if (disp == 0) {
4251 __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
4252 __ stlxr(rscratch1, src_reg, rscratch2);
4253 } else {
4254 __ lea(rscratch2, Address(base, disp));
4255 __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
4256 __ stlxr(rscratch1, src_reg, rscratch2);
4257 }
4258 }
4259 __ cmpw(rscratch1, zr);
4260 %}
4261
4262 enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4263 MacroAssembler _masm(&cbuf);
4264 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4265 __ cmpxchg(Assembler::xword,
4266 $oldval$$Register, $newval$$Register, $mem$$base$$Register,
4267 /*acquire*/ false, /*release*/ true);
4268 %}
4269
4270 enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4271 MacroAssembler _masm(&cbuf);
4272 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4273 __ cmpxchg(Assembler::word,
4274 $oldval$$Register, $newval$$Register, $mem$$base$$Register,
4275 /*acquire*/ false, /*release*/ true);
4276 %}
4277
4278
4279 // The only difference between aarch64_enc_cmpxchg and
4280 // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
4281 // CompareAndSwap sequence to serve as a barrier on acquiring a
4282 // lock.
4283 enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
4284 MacroAssembler _masm(&cbuf);
4285 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4286 __ cmpxchg(Assembler::xword,
4287 $oldval$$Register, $newval$$Register, $mem$$base$$Register,
4288 /*acquire*/ true, /*release*/ true);
4289 %}
4290
4291 enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
4292 MacroAssembler _masm(&cbuf);
4293 guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
4294 __ cmpxchg(Assembler::word,
4295 $oldval$$Register, $newval$$Register, $mem$$base$$Register,
4296 /*acquire*/ true, /*release*/ true);
4297 %}
4298
4299
4300 // auxiliary used for CompareAndSwapX to set result register
4301 enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
4302 MacroAssembler _masm(&cbuf);
4303 Register res_reg = as_Register($res$$reg);
4304 __ cset(res_reg, Assembler::EQ);
4305 %}
4306
4307 // prefetch encodings
4308
4309 enc_class aarch64_enc_prefetchw(memory mem) %{
4310 MacroAssembler _masm(&cbuf);
4311 Register base = as_Register($mem$$base);
4312 int index = $mem$$index;
4313 int scale = $mem$$scale;
4314 int disp = $mem$$disp;
4315 if (index == -1) {
4316 __ prfm(Address(base, disp), PSTL1KEEP);
4794 if ((EmitSync & 0x02) == 0) {
4795 // we can use AArch64's bit test and branch here but
4796 // markoopDesc does not define a bit index just the bit value
4797 // so assert in case the bit pos changes
4798 # define __monitor_value_log2 1
4799 assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position");
4800 __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor);
4801 # undef __monitor_value_log2
4802 }
4803
4804 // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
4805 __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value);
4806
4807 // Load Compare Value application register.
4808
4809 // Initialize the box. (Must happen before we update the object mark!)
4810 __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4811
4812 // Compare object markOop with mark and if equal exchange scratch1
4813 // with object markOop.
4814 if (UseLSE) {
4815 __ mov(tmp, disp_hdr);
4816 __ casal(Assembler::xword, tmp, box, oop);
4817 __ cmp(tmp, disp_hdr);
4818 __ br(Assembler::EQ, cont);
4819 } else {
4820 Label retry_load;
4821 __ bind(retry_load);
4822 __ ldaxr(tmp, oop);
4823 __ cmp(tmp, disp_hdr);
4824 __ br(Assembler::NE, cas_failed);
4825 // use stlxr to ensure update is immediately visible
4826 __ stlxr(tmp, box, oop);
4827 __ cbzw(tmp, cont);
4828 __ b(retry_load);
4829 }
4830
4831 // Formerly:
4832 // __ cmpxchgptr(/*oldv=*/disp_hdr,
4833 // /*newv=*/box,
4834 // /*addr=*/oop,
4835 // /*tmp=*/tmp,
4836 // cont,
4837 // /*fail*/NULL);
4838
4839 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4849 __ mov(rscratch1, sp);
4850 __ sub(disp_hdr, disp_hdr, rscratch1);
4851 __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
4852 // If condition is true we are cont and hence we can store 0 as the
4853 // displaced header in the box, which indicates that it is a recursive lock.
4854 __ ands(tmp/*==0?*/, disp_hdr, tmp);
4855 __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4856
4857 // Handle existing monitor.
4858 if ((EmitSync & 0x02) == 0) {
4859 __ b(cont);
4860
4861 __ bind(object_has_monitor);
4862 // The object's monitor m is unlocked iff m->owner == NULL,
4863 // otherwise m->owner may contain a thread or a stack address.
4864 //
4865 // Try to CAS m->owner from NULL to current thread.
4866 __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value));
4867 __ mov(disp_hdr, zr);
4868
4869 if (UseLSE) {
4870 __ mov(rscratch1, disp_hdr);
4871 __ casal(Assembler::xword, rscratch1, rthread, tmp);
4872 __ cmp(rscratch1, disp_hdr);
4873 } else {
4874 Label retry_load, fail;
4875 __ bind(retry_load);
4876 __ ldaxr(rscratch1, tmp);
4877 __ cmp(disp_hdr, rscratch1);
4878 __ br(Assembler::NE, fail);
4879 // use stlxr to ensure update is immediately visible
4880 __ stlxr(rscratch1, rthread, tmp);
4881 __ cbnzw(rscratch1, retry_load);
4882 __ bind(fail);
4883 }
4884
4885 // Label next;
4886 // __ cmpxchgptr(/*oldv=*/disp_hdr,
4887 // /*newv=*/rthread,
4888 // /*addr=*/tmp,
4889 // /*tmp=*/rscratch1,
4890 // /*succeed*/next,
4891 // /*fail*/NULL);
4892 // __ bind(next);
4893
4942 }
4943
4944 // Find the lock address and load the displaced header from the stack.
4945 __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
4946
4947 // If the displaced header is 0, we have a recursive unlock.
4948 __ cmp(disp_hdr, zr);
4949 __ br(Assembler::EQ, cont);
4950
4951
4952 // Handle existing monitor.
4953 if ((EmitSync & 0x02) == 0) {
4954 __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
4955 __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor);
4956 }
4957
4958 // Check if it is still a light weight lock, this is is true if we
4959 // see the stack address of the basicLock in the markOop of the
4960 // object.
4961
4962 if (UseLSE) {
4963 __ mov(tmp, box);
4964 __ casl(Assembler::xword, tmp, disp_hdr, oop);
4965 __ cmp(tmp, box);
4966 } else {
4967 Label retry_load;
4968 __ bind(retry_load);
4969 __ ldxr(tmp, oop);
4970 __ cmp(box, tmp);
4971 __ br(Assembler::NE, cas_failed);
4972 // use stlxr to ensure update is immediately visible
4973 __ stlxr(tmp, disp_hdr, oop);
4974 __ cbzw(tmp, cont);
4975 __ b(retry_load);
4976 }
4977
4978 // __ cmpxchgptr(/*compare_value=*/box,
4979 // /*exchange_value=*/disp_hdr,
4980 // /*where=*/oop,
4981 // /*result=*/tmp,
4982 // cont,
4983 // /*cas_failed*/NULL);
4984 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
4985
4986 __ bind(cas_failed);
|