24
25 #include "precompiled.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "gc/shared/cardTable.hpp"
31 #include "gc/shared/cardTableBarrierSet.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "nativeInst_s390.hpp"
34 #include "oops/compiledICHolder.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "register_s390.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/signature.hpp"
40 #include "runtime/vframeArray.hpp"
41 #include "utilities/macros.hpp"
42 #include "vmreg_s390.inline.hpp"
43 #include "registerSaver_s390.hpp"
44 #if INCLUDE_ALL_GCS
45 #include "gc/g1/g1BarrierSet.hpp"
46 #include "gc/g1/g1CardTable.hpp"
47 #include "gc/g1/g1ThreadLocalData.hpp"
48 #endif
49
50 // Implementation of StubAssembler
51
52 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
53 set_num_rt_args(0); // Nothing on stack.
54 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
55
56 // We cannot trust that code generated by the C++ compiler saves R14
57 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
58 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
59 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
60 // it into the frame anchor.
61 address pc = get_PC(Z_R1_scratch);
62 int call_offset = (int)(pc - addr_at(0));
63 set_last_Java_frame(Z_SP, Z_R1_scratch);
64
65 // ARG1 must hold thread address.
66 z_lgr(Z_ARG1, Z_thread);
67
|
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "gc/shared/cardTable.hpp"
31 #include "gc/shared/cardTableBarrierSet.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "nativeInst_s390.hpp"
34 #include "oops/compiledICHolder.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "register_s390.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/signature.hpp"
40 #include "runtime/vframeArray.hpp"
41 #include "utilities/macros.hpp"
42 #include "vmreg_s390.inline.hpp"
43 #include "registerSaver_s390.hpp"
44
45 // Implementation of StubAssembler
46
47 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
48 set_num_rt_args(0); // Nothing on stack.
49 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
50
51 // We cannot trust that code generated by the C++ compiler saves R14
52 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
53 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
54 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
55 // it into the frame anchor.
56 address pc = get_PC(Z_R1_scratch);
57 int call_offset = (int)(pc - addr_at(0));
58 set_last_Java_frame(Z_SP, Z_R1_scratch);
59
60 // ARG1 must hold thread address.
61 z_lgr(Z_ARG1, Z_thread);
62
|
172 RegisterSaver::RegisterSet reg_set =
173 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
174 int frame_size_in_slots =
175 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
176 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
177 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
178 }
179
180 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
181 if (!save_fpu_registers) {
182 __ unimplemented(FILE_AND_LINE);
183 }
184 __ block_comment("save_live_registers");
185 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
186 int frame_size_in_slots =
187 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
188 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
189 return RegisterSaver::save_live_registers(sasm, reg_set);
190 }
191
192 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
193 __ block_comment("save_volatile_registers");
194 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
195 int frame_size_in_slots =
196 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
197 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
198 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
199 }
200
201 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
202 __ block_comment("restore_live_registers");
203 RegisterSaver::RegisterSet reg_set =
204 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
205 RegisterSaver::restore_live_registers(sasm, reg_set);
206 }
207
208 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
209 if (!restore_fpu_registers) {
210 __ unimplemented(FILE_AND_LINE);
211 }
212 __ block_comment("restore_live_registers_except_r2");
213 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
214 }
215
216 static void restore_volatile_registers(StubAssembler* sasm) {
217 __ block_comment("restore_volatile_registers");
218 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
219 RegisterSaver::restore_live_registers(sasm, reg_set);
220 }
221
222 void Runtime1::initialize_pd() {
223 // Nothing to do.
224 }
225
226 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
227 // Make a frame and preserve the caller's caller-save registers.
228 OopMap* oop_map = save_live_registers(sasm);
229 int call_offset;
230 if (!has_argument) {
231 call_offset = __ call_RT(noreg, noreg, target);
232 } else {
233 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
234 }
235 OopMapSet* oop_maps = new OopMapSet();
236 oop_maps->add_gc_map(call_offset, oop_map);
237
238 __ should_not_reach_here();
239 return oop_maps;
240 }
|
167 RegisterSaver::RegisterSet reg_set =
168 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
169 int frame_size_in_slots =
170 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
171 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
172 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
173 }
174
175 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
176 if (!save_fpu_registers) {
177 __ unimplemented(FILE_AND_LINE);
178 }
179 __ block_comment("save_live_registers");
180 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
181 int frame_size_in_slots =
182 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
183 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
184 return RegisterSaver::save_live_registers(sasm, reg_set);
185 }
186
187 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
188 __ block_comment("restore_live_registers");
189 RegisterSaver::RegisterSet reg_set =
190 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
191 RegisterSaver::restore_live_registers(sasm, reg_set);
192 }
193
194 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
195 if (!restore_fpu_registers) {
196 __ unimplemented(FILE_AND_LINE);
197 }
198 __ block_comment("restore_live_registers_except_r2");
199 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
200 }
201
202 void Runtime1::initialize_pd() {
203 // Nothing to do.
204 }
205
206 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
207 // Make a frame and preserve the caller's caller-save registers.
208 OopMap* oop_map = save_live_registers(sasm);
209 int call_offset;
210 if (!has_argument) {
211 call_offset = __ call_RT(noreg, noreg, target);
212 } else {
213 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
214 }
215 OopMapSet* oop_maps = new OopMapSet();
216 oop_maps->add_gc_map(call_offset, oop_map);
217
218 __ should_not_reach_here();
219 return oop_maps;
220 }
|
746 __ jmp(do_return);
747
748 __ bind(return_min_jlong);
749 __ mov64(rax, UCONST64(0x8000000000000000));
750 __ jmp(do_return);
751
752 __ bind(return0);
753 __ fpop();
754 __ xorptr(rax, rax);
755
756 __ bind(do_return);
757 __ addptr(rsp, 32);
758 LP64_ONLY(__ pop(rdx);)
759 __ pop(rcx);
760 __ pop(rsi);
761 __ ret(0);
762 }
763 break;
764 #endif // TODO
765
766 #if INCLUDE_ALL_GCS
767 case g1_pre_barrier_slow_id:
768 { // Z_R1_scratch: previous value of memory
769
770 BarrierSet* bs = BarrierSet::barrier_set();
771 if (bs->kind() != BarrierSet::G1BarrierSet) {
772 __ should_not_reach_here(FILE_AND_LINE);
773 break;
774 }
775
776 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
777
778 Register pre_val = Z_R1_scratch;
779 Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
780 Register tmp2 = Z_R7;
781
782 Label refill, restart, marking_not_active;
783 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
784 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
785 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
786
787 // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
788 __ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
789 __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
790
791 // Is marking still active?
792 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
793 __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
794 } else {
795 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
796 __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
797 }
798 __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
799
800 __ bind(restart);
801 // Load the index into the SATB buffer. SATBMarkQueue::_index is a
802 // size_t so ld_ptr is appropriate.
803 __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
804
805 // index == 0?
806 __ z_brz(refill);
807
808 __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
809 __ add2reg(tmp, -oopSize);
810
811 __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
812 __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
813
814 __ bind(marking_not_active);
815 // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
816 __ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
817 __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
818 __ z_br(Z_R14);
819
820 __ bind(refill);
821 save_volatile_registers(sasm);
822 __ z_lgr(tmp, pre_val); // save pre_val
823 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
824 Z_thread);
825 __ z_lgr(pre_val, tmp); // restore pre_val
826 restore_volatile_registers(sasm);
827 __ z_bru(restart);
828 }
829 break;
830
831 case g1_post_barrier_slow_id:
832 { // Z_R1_scratch: oop address, address of updated memory slot
833 BarrierSet* bs = BarrierSet::barrier_set();
834 if (bs->kind() != BarrierSet::G1BarrierSet) {
835 __ should_not_reach_here(FILE_AND_LINE);
836 break;
837 }
838
839 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
840
841 Register addr_oop = Z_R1_scratch;
842 Register addr_card = Z_R1_scratch;
843 Register r1 = Z_R6; // Must be saved/restored.
844 Register r2 = Z_R7; // Must be saved/restored.
845 Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
846 jbyte* byte_map_base = ci_card_table_address();
847
848 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
849 __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
850
851 Label not_already_dirty, restart, refill, young_card;
852
853 // Calculate address of card corresponding to the updated oop slot.
854 AddressLiteral rs(byte_map_base);
855 __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
856 addr_oop = noreg; // dead now
857 __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
858 __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
859
860 __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
861 __ z_bre(young_card);
862
863 __ z_sync(); // Required to support concurrent cleaning.
864
865 __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
866 __ z_brne(not_already_dirty);
867
868 __ bind(young_card);
869 // We didn't take the branch, so we're already dirty: restore
870 // used registers and return.
871 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
872 __ z_br(Z_R14);
873
874 // Not dirty.
875 __ bind(not_already_dirty);
876
877 // First, dirty it: [addr_card] := 0
878 __ z_mvi(0, addr_card, CardTable::dirty_card_val());
879
880 Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
881 Register buf = r2;
882 cardtable = noreg; // now dead
883
884 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
885 __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
886
887 ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
888 ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
889
890 __ bind(restart);
891
892 // Get the index into the update buffer. DirtyCardQueue::_index is
893 // a size_t so z_ltg is appropriate here.
894 __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
895
896 // index == 0?
897 __ z_brz(refill);
898
899 __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
900 __ add2reg(idx, -oopSize);
901
902 __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
903 __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
904 // Restore killed registers and return.
905 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
906 __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
907 __ z_br(Z_R14);
908
909 __ bind(refill);
910 save_volatile_registers(sasm);
911 __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
912 __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
913 Z_thread);
914 __ z_lgr(addr_card, idx);
915 restore_volatile_registers(sasm); // Restore addr_card.
916 __ z_bru(restart);
917 }
918 break;
919 #endif // INCLUDE_ALL_GCS
920 case predicate_failed_trap_id:
921 {
922 __ set_info("predicate_failed_trap", dont_gc_arguments);
923
924 OopMap* map = save_live_registers(sasm);
925
926 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
927 oop_maps = new OopMapSet();
928 oop_maps->add_gc_map(call_offset, map);
929 restore_live_registers(sasm);
930
931 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
932 assert(deopt_blob != NULL, "deoptimization blob must have been created");
933
934 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
935 __ z_br(Z_R1_scratch);
936 }
937 break;
938
|
726 __ jmp(do_return);
727
728 __ bind(return_min_jlong);
729 __ mov64(rax, UCONST64(0x8000000000000000));
730 __ jmp(do_return);
731
732 __ bind(return0);
733 __ fpop();
734 __ xorptr(rax, rax);
735
736 __ bind(do_return);
737 __ addptr(rsp, 32);
738 LP64_ONLY(__ pop(rdx);)
739 __ pop(rcx);
740 __ pop(rsi);
741 __ ret(0);
742 }
743 break;
744 #endif // TODO
745
746 case predicate_failed_trap_id:
747 {
748 __ set_info("predicate_failed_trap", dont_gc_arguments);
749
750 OopMap* map = save_live_registers(sasm);
751
752 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
753 oop_maps = new OopMapSet();
754 oop_maps->add_gc_map(call_offset, map);
755 restore_live_registers(sasm);
756
757 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
758 assert(deopt_blob != NULL, "deoptimization blob must have been created");
759
760 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
761 __ z_br(Z_R1_scratch);
762 }
763 break;
764
|