755 case counter_overflow_id:
756 {
757 Register bci = r0, method = r1;
758 __ enter();
759 OopMap* map = save_live_registers(sasm);
760 // Retrieve bci
761 __ ldrw(bci, Address(rfp, 2*BytesPerWord));
762 // And a pointer to the Method*
763 __ ldr(method, Address(rfp, 3*BytesPerWord));
764 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
765 oop_maps = new OopMapSet();
766 oop_maps->add_gc_map(call_offset, map);
767 restore_live_registers(sasm);
768 __ leave();
769 __ ret(lr);
770 }
771 break;
772
773 case new_type_array_id:
774 case new_object_array_id:
775 {
776 Register length = r19; // Incoming
777 Register klass = r3; // Incoming
778 Register obj = r0; // Result
779
780 if (id == new_type_array_id) {
781 __ set_info("new_type_array", dont_gc_arguments);
782 } else {
783 __ set_info("new_object_array", dont_gc_arguments);
784 }
785
786 #ifdef ASSERT
787 // assert object type is really an array of the proper kind
788 {
789 Label ok;
790 Register t0 = obj;
791 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
792 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
793 int tag = ((id == new_type_array_id)
794 ? Klass::_lh_array_tag_type_value
795 : Klass::_lh_array_tag_obj_value);
796 __ mov(rscratch1, tag);
797 __ cmpw(t0, rscratch1);
798 __ br(Assembler::EQ, ok);
799 __ stop("assert(is an array klass)");
800 __ should_not_reach_here();
801 __ bind(ok);
802 }
803 #endif // ASSERT
804
805 // If TLAB is disabled, see if there is support for inlining contiguous
806 // allocations.
807 // Otherwise, just go to the slow path.
808 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
809 Register arr_size = r4;
810 Register t1 = r2;
811 Register t2 = r5;
812 Label slow_path;
813 assert_different_registers(length, klass, obj, arr_size, t1, t2);
814
815 // check that array length is small enough for fast path.
835 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
836 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
837 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
838 __ andr(t1, t1, Klass::_lh_header_size_mask);
839 __ sub(arr_size, arr_size, t1); // body length
840 __ add(t1, t1, obj); // body start
841 __ initialize_body(t1, arr_size, 0, t2);
842 __ verify_oop(obj);
843
844 __ ret(lr);
845
846 __ bind(slow_path);
847 }
848
849 __ enter();
850 OopMap* map = save_live_registers(sasm);
851 int call_offset;
852 if (id == new_type_array_id) {
853 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
854 } else {
855 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
856 }
857
858 oop_maps = new OopMapSet();
859 oop_maps->add_gc_map(call_offset, map);
860 restore_live_registers_except_r0(sasm);
861
862 __ verify_oop(obj);
863 __ leave();
864 __ ret(lr);
865
866 // r0: new array
867 }
868 break;
869
870 case new_multi_array_id:
871 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
872 // r0,: klass
873 // r19,: rank
874 // r2: address of 1st dimension
875 OopMap* map = save_live_registers(sasm);
876 __ mov(c_rarg1, r0);
877 __ mov(c_rarg3, r2);
878 __ mov(c_rarg2, r19);
879 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
880
881 oop_maps = new OopMapSet();
882 oop_maps->add_gc_map(call_offset, map);
883 restore_live_registers_except_r0(sasm);
884
885 // r0,: new multi array
886 __ verify_oop(r0);
887 }
888 break;
889
890 case register_finalizer_id:
891 {
892 __ set_info("register_finalizer", dont_gc_arguments);
893
894 // This is called via call_runtime so the arguments
895 // will be place in C abi locations
896
897 __ verify_oop(c_rarg0);
898
899 // load the klass and check the has finalizer flag
900 Label register_finalizer;
901 Register t = r5;
902 __ load_klass(t, r0);
903 __ ldrw(t, Address(t, Klass::access_flags_offset()));
904 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
905 __ ret(lr);
906
907 __ bind(register_finalizer);
908 __ enter();
909 OopMap* oop_map = save_live_registers(sasm);
910 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
911 oop_maps = new OopMapSet();
912 oop_maps->add_gc_map(call_offset, oop_map);
913
914 // Now restore all the live registers
915 restore_live_registers(sasm);
916
917 __ leave();
918 __ ret(lr);
919 }
920 break;
921
922 case throw_class_cast_exception_id:
923 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
924 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
925 }
926 break;
927
928 case throw_incompatible_class_change_error_id:
929 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
930 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
931 }
932 break;
933
934 case slow_subtype_check_id:
935 {
936 // Typical calling sequence:
937 // __ push(klass_RInfo); // object klass or other subclass
938 // __ push(sup_k_RInfo); // array element klass or other superclass
939 // __ bl(slow_subtype_check);
940 // Note that the subclass is pushed first, and is therefore deepest.
941 enum layout {
942 r0_off, r0_off_hi,
943 r2_off, r2_off_hi,
944 r4_off, r4_off_hi,
945 r5_off, r5_off_hi,
946 sup_k_off, sup_k_off_hi,
947 klass_off, klass_off_hi,
948 framesize,
949 result_off = sup_k_off
950 };
951
952 __ set_info("slow_subtype_check", dont_gc_arguments);
953 __ push(RegSet::of(r0, r2, r4, r5), sp);
1105 break;
1106
1107 case predicate_failed_trap_id:
1108 {
1109 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1110
1111 OopMap* map = save_live_registers(sasm);
1112
1113 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1114 oop_maps = new OopMapSet();
1115 oop_maps->add_gc_map(call_offset, map);
1116 restore_live_registers(sasm);
1117 __ leave();
1118 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1119 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1120
1121 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1122 }
1123 break;
1124
1125
1126 default:
1127 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1128 __ mov(r0, (int)id);
1129 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1130 __ should_not_reach_here();
1131 }
1132 break;
1133 }
1134 }
1135 return oop_maps;
1136 }
1137
1138 #undef __
1139
1140 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
|
755 case counter_overflow_id:
756 {
757 Register bci = r0, method = r1;
758 __ enter();
759 OopMap* map = save_live_registers(sasm);
760 // Retrieve bci
761 __ ldrw(bci, Address(rfp, 2*BytesPerWord));
762 // And a pointer to the Method*
763 __ ldr(method, Address(rfp, 3*BytesPerWord));
764 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
765 oop_maps = new OopMapSet();
766 oop_maps->add_gc_map(call_offset, map);
767 restore_live_registers(sasm);
768 __ leave();
769 __ ret(lr);
770 }
771 break;
772
773 case new_type_array_id:
774 case new_object_array_id:
775 case new_value_array_id:
776 {
777 Register length = r19; // Incoming
778 Register klass = r3; // Incoming
779 Register obj = r0; // Result
780
781 if (id == new_type_array_id) {
782 __ set_info("new_type_array", dont_gc_arguments);
783 }
784 else if (id == new_object_array_id) {
785 __ set_info("new_object_array", dont_gc_arguments);
786 }
787 else {
788 __ set_info("new_value_array", dont_gc_arguments);
789 }
790
791 #ifdef ASSERT
792 // assert object type is really an array of the proper kind
793 {
794 Label ok;
795 Register t0 = obj;
796 __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
797 __ asrw(t0, t0, Klass::_lh_array_tag_shift);
798
799 int tag = 0;
800 switch (id) {
801 case new_type_array_id: tag = Klass::_lh_array_tag_type_value; break;
802 case new_object_array_id: tag = Klass::_lh_array_tag_obj_value; break;
803 case new_value_array_id: tag = Klass::_lh_array_tag_vt_value; break;
804 default: ShouldNotReachHere();
805 }
806 __ mov(rscratch1, tag);
807 __ cmpw(t0, rscratch1);
808 __ br(Assembler::EQ, ok);
809 __ stop("assert(is an array klass)");
810 __ should_not_reach_here();
811 __ bind(ok);
812 }
813 #endif // ASSERT
814
815 // If TLAB is disabled, see if there is support for inlining contiguous
816 // allocations.
817 // Otherwise, just go to the slow path.
818 if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
819 Register arr_size = r4;
820 Register t1 = r2;
821 Register t2 = r5;
822 Label slow_path;
823 assert_different_registers(length, klass, obj, arr_size, t1, t2);
824
825 // check that array length is small enough for fast path.
845 __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
846 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
847 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
848 __ andr(t1, t1, Klass::_lh_header_size_mask);
849 __ sub(arr_size, arr_size, t1); // body length
850 __ add(t1, t1, obj); // body start
851 __ initialize_body(t1, arr_size, 0, t2);
852 __ verify_oop(obj);
853
854 __ ret(lr);
855
856 __ bind(slow_path);
857 }
858
859 __ enter();
860 OopMap* map = save_live_registers(sasm);
861 int call_offset;
862 if (id == new_type_array_id) {
863 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
864 } else {
865 // Runtime1::new_object_array handles both object and value arrays
866 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
867 }
868
869 oop_maps = new OopMapSet();
870 oop_maps->add_gc_map(call_offset, map);
871 restore_live_registers_except_r0(sasm);
872
873 __ verify_oop(obj);
874 __ leave();
875 __ ret(lr);
876
877 // r0: new array
878 }
879 break;
880
881 case new_multi_array_id:
882 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
883 // r0,: klass
884 // r19,: rank
885 // r2: address of 1st dimension
886 OopMap* map = save_live_registers(sasm);
887 __ mov(c_rarg1, r0);
888 __ mov(c_rarg3, r2);
889 __ mov(c_rarg2, r19);
890 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
891
892 oop_maps = new OopMapSet();
893 oop_maps->add_gc_map(call_offset, map);
894 restore_live_registers_except_r0(sasm);
895
896 // r0,: new multi array
897 __ verify_oop(r0);
898 }
899 break;
900
901 case load_flattened_array_id: // DMS CHECK
902 {
903 StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
904 OopMap* map = save_live_registers(sasm, 3);
905
906 // Called with store_parameter and not C abi
907
908 f.load_argument(1, r0); // rax,: array
909 f.load_argument(0, r1); // rbx,: index
910 int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), r0, r1);
911
912 oop_maps = new OopMapSet();
913 oop_maps->add_gc_map(call_offset, map);
914 restore_live_registers_except_r0(sasm);
915
916 // rax,: loaded element at array[index]
917 __ verify_oop(r0);
918 }
919 break;
920
921 case store_flattened_array_id: // DMS CHECK
922 {
923 StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
924 OopMap* map = save_live_registers(sasm, 4);
925
926 // Called with store_parameter and not C abi
927
928 f.load_argument(2, r0); // rax,: array
929 f.load_argument(1, r1); // rbx,: index
930 f.load_argument(0, r2); // rcx,: value
931 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), r0, r1, r2);
932
933 oop_maps = new OopMapSet();
934 oop_maps->add_gc_map(call_offset, map);
935 restore_live_registers_except_r0(sasm);
936 }
937 break;
938
939
940 case register_finalizer_id:
941 {
942 __ set_info("register_finalizer", dont_gc_arguments);
943
944 // This is called via call_runtime so the arguments
945 // will be place in C abi locations
946
947 __ verify_oop(c_rarg0);
948
949 // load the klass and check the has finalizer flag
950 Label register_finalizer;
951 Register t = r5;
952 __ load_klass(t, r0);
953 __ ldrw(t, Address(t, Klass::access_flags_offset()));
954 __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
955 __ ret(lr);
956
957 __ bind(register_finalizer);
958 __ enter();
959 OopMap* oop_map = save_live_registers(sasm);
960 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
961 oop_maps = new OopMapSet();
962 oop_maps->add_gc_map(call_offset, oop_map);
963
964 // Now restore all the live registers
965 restore_live_registers(sasm);
966
967 __ leave();
968 __ ret(lr);
969 }
970 break;
971
972 case throw_class_cast_exception_id:
973 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
974 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
975 }
976 break;
977
978 case throw_incompatible_class_change_error_id:
979 { StubFrame f(sasm, "throw_incompatible_class_change_exception", dont_gc_arguments);
980 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
981 }
982 break;
983
984 case throw_illegal_monitor_state_exception_id:
985 { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
986 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
987 }
988 break;
989
990 case slow_subtype_check_id:
991 {
992 // Typical calling sequence:
993 // __ push(klass_RInfo); // object klass or other subclass
994 // __ push(sup_k_RInfo); // array element klass or other superclass
995 // __ bl(slow_subtype_check);
996 // Note that the subclass is pushed first, and is therefore deepest.
997 enum layout {
998 r0_off, r0_off_hi,
999 r2_off, r2_off_hi,
1000 r4_off, r4_off_hi,
1001 r5_off, r5_off_hi,
1002 sup_k_off, sup_k_off_hi,
1003 klass_off, klass_off_hi,
1004 framesize,
1005 result_off = sup_k_off
1006 };
1007
1008 __ set_info("slow_subtype_check", dont_gc_arguments);
1009 __ push(RegSet::of(r0, r2, r4, r5), sp);
1161 break;
1162
1163 case predicate_failed_trap_id:
1164 {
1165 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1166
1167 OopMap* map = save_live_registers(sasm);
1168
1169 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1170 oop_maps = new OopMapSet();
1171 oop_maps->add_gc_map(call_offset, map);
1172 restore_live_registers(sasm);
1173 __ leave();
1174 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1175 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1176
1177 __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1178 }
1179 break;
1180
1181 default: // DMS CHECK: we come here with id:0 and id:32 during VM intialization, should it be fixed?
1182 // tty->print_cr("DMS id %d not handled", id);
1183
1184 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1185 __ mov(r0, (int)id);
1186 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1187 __ should_not_reach_here();
1188 }
1189 break;
1190 }
1191 }
1192
1193
1194 return oop_maps;
1195 }
1196
1197 #undef __
1198
1199 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }
|