151
152 // Call Interpreter::remove_activation_early_entry() to get the address of the
153 // same-named entrypoint in the generated interpreter code.
154 ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
155 ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));
156 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);
157 br(r0);
158 bind(L);
159 }
160 }
161
162 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
163 Register reg,
164 int bcp_offset) {
165 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
166 ldrh(reg, Address(rbcp, bcp_offset));
167 rev16(reg, reg);
168 }
169
170 void InterpreterMacroAssembler::get_dispatch() {
171 unsigned long offset;
172 adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
173 lea(rdispatch, Address(rdispatch, offset));
174 }
175
176 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
177 int bcp_offset,
178 size_t index_size) {
179 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
180 if (index_size == sizeof(u2)) {
181 load_unsigned_short(index, Address(rbcp, bcp_offset));
182 } else if (index_size == sizeof(u4)) {
183 // assert(EnableInvokeDynamic, "giant index used only for JSR 292");
184 ldrw(index, Address(rbcp, bcp_offset));
185 // Check if the secondary index definition is still ~x, otherwise
186 // we have to change the following assembler code to calculate the
187 // plain index.
188 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
189 eonw(index, index, zr); // convert to plain index
190 } else if (index_size == sizeof(u1)) {
191 load_unsigned_byte(index, Address(rbcp, bcp_offset));
748 rscratch2, rscratch1, tmp);
749 b(done);
750 bind(fail);
751 } else {
752 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
753 }
754
755 // Test if the oopMark is an obvious stack pointer, i.e.,
756 // 1) (mark & 7) == 0, and
757 // 2) rsp <= mark < mark + os::pagesize()
758 //
759 // These 3 tests can be done by evaluating the following
760 // expression: ((mark - rsp) & (7 - os::vm_page_size())),
761 // assuming both stack pointer and pagesize have their
762 // least significant 3 bits clear.
763 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
764 // NOTE2: aarch64 does not like to subtract sp from rn so take a
765 // copy
766 mov(rscratch1, sp);
767 sub(swap_reg, swap_reg, rscratch1);
768 ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size()));
769
770 // Save the test result, for recursive case, the result is zero
771 str(swap_reg, Address(lock_reg, mark_offset));
772
773 if (PrintBiasedLockingStatistics) {
774 br(Assembler::NE, slow_case);
775 atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
776 rscratch2, rscratch1, tmp);
777 }
778 br(Assembler::EQ, done);
779
780 bind(slow_case);
781
782 // Call the runtime routine for slow case
783 call_VM(noreg,
784 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
785 lock_reg);
786
787 bind(done);
788 }
|
151
152 // Call Interpreter::remove_activation_early_entry() to get the address of the
153 // same-named entrypoint in the generated interpreter code.
154 ldr(rscratch1, Address(rthread, JavaThread::jvmti_thread_state_offset()));
155 ldrw(rscratch1, Address(rscratch1, JvmtiThreadState::earlyret_tos_offset()));
156 call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), rscratch1);
157 br(r0);
158 bind(L);
159 }
160 }
161
162 void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(
163 Register reg,
164 int bcp_offset) {
165 assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
166 ldrh(reg, Address(rbcp, bcp_offset));
167 rev16(reg, reg);
168 }
169
170 void InterpreterMacroAssembler::get_dispatch() {
171 uint64_t offset;
172 adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
173 lea(rdispatch, Address(rdispatch, offset));
174 }
175
176 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index,
177 int bcp_offset,
178 size_t index_size) {
179 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
180 if (index_size == sizeof(u2)) {
181 load_unsigned_short(index, Address(rbcp, bcp_offset));
182 } else if (index_size == sizeof(u4)) {
183 // assert(EnableInvokeDynamic, "giant index used only for JSR 292");
184 ldrw(index, Address(rbcp, bcp_offset));
185 // Check if the secondary index definition is still ~x, otherwise
186 // we have to change the following assembler code to calculate the
187 // plain index.
188 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
189 eonw(index, index, zr); // convert to plain index
190 } else if (index_size == sizeof(u1)) {
191 load_unsigned_byte(index, Address(rbcp, bcp_offset));
748 rscratch2, rscratch1, tmp);
749 b(done);
750 bind(fail);
751 } else {
752 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
753 }
754
755 // Test if the oopMark is an obvious stack pointer, i.e.,
756 // 1) (mark & 7) == 0, and
757 // 2) rsp <= mark < mark + os::pagesize()
758 //
759 // These 3 tests can be done by evaluating the following
760 // expression: ((mark - rsp) & (7 - os::vm_page_size())),
761 // assuming both stack pointer and pagesize have their
762 // least significant 3 bits clear.
763 // NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
764 // NOTE2: aarch64 does not like to subtract sp from rn so take a
765 // copy
766 mov(rscratch1, sp);
767 sub(swap_reg, swap_reg, rscratch1);
768 ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size()));
769
770 // Save the test result, for recursive case, the result is zero
771 str(swap_reg, Address(lock_reg, mark_offset));
772
773 if (PrintBiasedLockingStatistics) {
774 br(Assembler::NE, slow_case);
775 atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
776 rscratch2, rscratch1, tmp);
777 }
778 br(Assembler::EQ, done);
779
780 bind(slow_case);
781
782 // Call the runtime routine for slow case
783 call_VM(noreg,
784 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
785 lock_reg);
786
787 bind(done);
788 }
|