778 prepare_to_jump_from_interpreted();
779
780 if (JvmtiExport::can_post_interpreter_events()) {
781 Label run_compiled_code;
782 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
783 // compiled code in threads for which the event is enabled. Check here for
784 // interp_only_mode if these events CAN be enabled.
785 // interp_only is an int, on little endian it is sufficient to test the byte only
786 // Is a cmpl faster?
787 LP64_ONLY(temp = r15_thread;)
788 NOT_LP64(get_thread(temp);)
789 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
790 jccb(Assembler::zero, run_compiled_code);
791 jmp(Address(method, Method::interpreter_entry_offset()));
792 bind(run_compiled_code);
793 }
794
795 jmp(Address(method, Method::from_interpreted_offset()));
796 }
797
798 // The following two routines provide a hook so that an implementation
799 // can schedule the dispatch in two parts. x86 does not do this.
800 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
801 // Nothing x86 specific to be done here
802 }
803
804 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
805 dispatch_next(state, step);
806 }
807
808 void InterpreterMacroAssembler::dispatch_base(TosState state,
809 address* table,
810 bool verifyoop,
811 bool generate_poll) {
812 verify_FPU(1, state);
813 if (VerifyActivationFrameSize) {
814 Label L;
815 mov(rcx, rbp);
816 subptr(rcx, rsp);
817 int32_t min_frame_size =
|
778 prepare_to_jump_from_interpreted();
779
780 if (JvmtiExport::can_post_interpreter_events()) {
781 Label run_compiled_code;
782 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
783 // compiled code in threads for which the event is enabled. Check here for
784 // interp_only_mode if these events CAN be enabled.
785 // interp_only is an int, on little endian it is sufficient to test the byte only
786 // Is a cmpl faster?
787 LP64_ONLY(temp = r15_thread;)
788 NOT_LP64(get_thread(temp);)
789 cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
790 jccb(Assembler::zero, run_compiled_code);
791 jmp(Address(method, Method::interpreter_entry_offset()));
792 bind(run_compiled_code);
793 }
794
795 jmp(Address(method, Method::from_interpreted_offset()));
796 }
797
798 // void InterpreterMacroAssembler::resolve_special(Register rmethod, LinkInfo link_info) {
799 // CallInfo callinfo;
800 // LinkResolver::resolve_special_call(callinfo, Handle(), link_info, Thread::current());
801 // methodHandle methodh = callinfo.selected_method();
802 // assert(methodh.not_null(), "should have thrown exception");
803 // Method* method = methodh();
804 // tty->print_cr("call_Java_final method: %p name: %s", method, method->name()->as_C_string());
805 // // tty->print_cr("call_Java_final const: %p, params: %d locals %d", method->constMethod(), method->constMethod()->_size_of_parameters, method->constMethod()->_max_locals);
806
807 // movptr(rmethod, AddressLiteral((address)method, RelocationHolder::none).addr());
808 // }
809
810 // void InterpreterMacroAssembler::get_entry(Register entry, Register method) {
811 // // TODO: see InterpreterMacroAssembler::jump_from_interpreted for special cases
812 // Label done;
813 // // if (JvmtiExport::can_post_interpreter_events()) {
814 // // Register temp;
815 // // Label run_compiled_code;
816 // // // JVMTI events, such as single-stepping, are implemented partly by avoiding running
817 // // // compiled code in threads for which the event is enabled. Check here for
818 // // // interp_only_mode if these events CAN be enabled.
819 // // // interp_only is an int, on little endian it is sufficient to test the byte only
820 // // // Is a cmpl faster?
821 // // LP64_ONLY(temp = r15_thread;)
822 // // NOT_LP64(get_thread(temp);)
823 // // cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0);
824 // // jccb(Assembler::zero, run_compiled_code);
825 // // movptr(entry, Address(method, Method::interpreter_entry_offset()));
826 // // bind(run_compiled_code);
827 // // }
828 // movptr(entry, Address(method, Method::from_interpreted_offset()));
829 // bind(done);
830 // }
831
832 // // loads method into rbx
833 // void InterpreterMacroAssembler::get_entry(Register entry, LinkInfo link_info) {
834 // resolve_special(rbx, link_info);
835 // get_entry(entry, rbx);
836 // }
837
838 // void InterpreterMacroAssembler::call_Java_final(LinkInfo link_info) {
839 // Register rentry = rax;
840 // get_entry(rentry, link_info);
841
842 // // profile_call(rax); // ?? rax
843 // // profile_arguments_type(rax, rbx, rbcp, false);
844 // call(rentry);
845 // }
846
847 // void InterpreterMacroAssembler::jump_Java_final(LinkInfo link_info) {
848 // Register rentry = rax;
849 // get_entry(rentry, link_info);
850
851 // // profile_call(rax); // ?? rax
852 // // profile_arguments_type(rax, rbx, rbcp, false);
853 // jmp(rentry);
854 // }
855
856 // The following two routines provide a hook so that an implementation
857 // can schedule the dispatch in two parts. x86 does not do this.
858 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int step) {
859 // Nothing x86 specific to be done here
860 }
861
862 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int step) {
863 dispatch_next(state, step);
864 }
865
866 void InterpreterMacroAssembler::dispatch_base(TosState state,
867 address* table,
868 bool verifyoop,
869 bool generate_poll) {
870 verify_FPU(1, state);
871 if (VerifyActivationFrameSize) {
872 Label L;
873 mov(rcx, rbp);
874 subptr(rcx, rsp);
875 int32_t min_frame_size =
|