835 //
836 // AArch64:
837 // [ extended SP (*) ]
838 // [ stack top (*) ]
839 //
840 // [ sender_sp ]
841 // [ saved FP ] <--- FP
842 // [ saved LR ]
843 // [ optional padding(*)]
844 // [ local variable m ]
845 // ...
846 // [ local variable 1 ]
847 // [ parameter n ]
848 // ...
849 // [ parameter 1 ] <--- Rlocals
850 //
851 // (*) - AArch64 only
852 //
853
854 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
855 #if INCLUDE_ALL_GCS
856 if (UseG1GC) {
857 // Code: _aload_0, _getfield, _areturn
858 // parameter size = 1
859 //
860 // The code that gets generated by this routine is split into 2 parts:
861 // 1. The "intrinsified" code for G1 (or any SATB based GC),
862 // 2. The slow path - which is an expansion of the regular method entry.
863 //
864 // Notes:-
865 // * In the G1 code we do not check whether we need to block for
866 // a safepoint. If G1 is enabled then we must execute the specialized
867 // code for Reference.get (except when the Reference object is null)
868 // so that we can log the value in the referent field with an SATB
869 // update buffer.
870 // If the code for the getfield template is modified so that the
871 // G1 pre-barrier code is executed when the current method is
872 // Reference.get() then going through the normal method entry
873 // will be fine.
874 // * The G1 code can, however, check the receiver object (the instance
875 // of java.lang.Reference) and jump to the slow path if null. If the
876 // Reference object is null then we obviously cannot fetch the referent
877 // and so we don't need to call the G1 pre-barrier. Thus we can use the
878 // regular method entry code to generate the NPE.
879 //
880 // This code is based on generate_accessor_enty.
881 //
882 // Rmethod: Method*
883 // Rthread: thread
884 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
885 // Rparams: parameters
886
887 address entry = __ pc();
888 Label slow_path;
889 const Register Rthis = R0;
890 const Register Rret_addr = Rtmp_save1;
891 assert_different_registers(Rthis, Rret_addr, Rsender_sp);
892
893 const int referent_offset = java_lang_ref_Reference::referent_offset;
894 guarantee(referent_offset > 0, "referent offset not initialized");
895
896 // Check if local 0 != NULL
897 // If the receiver is null then it is OK to jump to the slow path.
898 __ ldr(Rthis, Address(Rparams));
899 __ cbz(Rthis, slow_path);
900
901 // Generate the G1 pre-barrier code to log the value of
902 // the referent field in an SATB buffer.
903
904 // Load the value of the referent field.
905 __ load_heap_oop(R0, Address(Rthis, referent_offset));
906
907 // Preserve LR
908 __ mov(Rret_addr, LR);
909
910 __ g1_write_barrier_pre(noreg, // store_addr
911 noreg, // new_val
912 R0, // pre_val
913 Rtemp, // tmp1
914 R1_tmp); // tmp2
915
916 // _areturn
917 __ mov(SP, Rsender_sp);
918 __ ret(Rret_addr);
919
920 // generate a vanilla interpreter entry as the slow path
921 __ bind(slow_path);
922 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
923 return entry;
924 }
925 #endif // INCLUDE_ALL_GCS
926
927 // If G1 is not enabled then attempt to go through the normal entry point
928 return NULL;
929 }
930
931 // Not supported
932 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; }
933 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
934 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
935
936 //
937 // Interpreter stub for calling a native method. (asm interpreter)
938 // This sets up a somewhat different looking stack for calling the native method
939 // than the typical interpreter frame setup.
940 //
941
942 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
943 // determine code generation flags
944 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
945
946 // Incoming registers:
947 //
948 // Rmethod: Method*
|
835 //
836 // AArch64:
837 // [ extended SP (*) ]
838 // [ stack top (*) ]
839 //
840 // [ sender_sp ]
841 // [ saved FP ] <--- FP
842 // [ saved LR ]
843 // [ optional padding(*)]
844 // [ local variable m ]
845 // ...
846 // [ local variable 1 ]
847 // [ parameter n ]
848 // ...
849 // [ parameter 1 ] <--- Rlocals
850 //
851 // (*) - AArch64 only
852 //
853
854 address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
855 // Code: _aload_0, _getfield, _areturn
856 // parameter size = 1
857 //
858 // The code that gets generated by this routine is split into 2 parts:
859 // 1. The "intrinsified" code performing an ON_WEAK_OOP_REF load,
860 // 2. The slow path - which is an expansion of the regular method entry.
861 //
862 // Notes:-
863 // * An intrinsic is always executed, where an ON_WEAK_OOP_REF load is performed.
864 // * We may jump to the slow path iff the receiver is null. If the
865 // Reference object is null then we no longer perform an ON_WEAK_OOP_REF load
866 // Thus we can use the regular method entry code to generate the NPE.
867 //
868 // Rmethod: Method*
869 // Rthread: thread
870 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path
871 // Rparams: parameters
872
873 address entry = __ pc();
874 Label slow_path;
875 const Register Rthis = R0;
876 const Register Rret_addr = Rtmp_save1;
877 assert_different_registers(Rthis, Rret_addr, Rsender_sp);
878
879 const int referent_offset = java_lang_ref_Reference::referent_offset;
880 guarantee(referent_offset > 0, "referent offset not initialized");
881
882 // Check if local 0 != NULL
883 // If the receiver is null then it is OK to jump to the slow path.
884 __ ldr(Rthis, Address(Rparams));
885 __ cbz(Rthis, slow_path);
886
887 // Preserve LR
888 __ mov(Rret_addr, LR);
889
890 // Load the value of the referent field.
891 const Address field_address(Rthis, referent_offset);
892 __ load_heap_oop(R0, field_address, Rtemp, R1_tmp, R2_tmp, ON_WEAK_OOP_REF);
893
894 // _areturn
895 __ mov(SP, Rsender_sp);
896 __ ret(Rret_addr);
897
898 // generate a vanilla interpreter entry as the slow path
899 __ bind(slow_path);
900 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
901 return entry;
902 }
903
904 // Not supported
905 address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; }
906 address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
907 address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
908
909 //
910 // Interpreter stub for calling a native method. (asm interpreter)
911 // This sets up a somewhat different looking stack for calling the native method
912 // than the typical interpreter frame setup.
913 //
914
915 address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
916 // determine code generation flags
917 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
918
919 // Incoming registers:
920 //
921 // Rmethod: Method*
|