19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "nativeInst_sparc.hpp"
31 #include "oops/compiledICHolder.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/signature.hpp"
36 #include "runtime/vframeArray.hpp"
37 #include "utilities/macros.hpp"
38 #include "vmreg_sparc.inline.hpp"
39 #if INCLUDE_ALL_GCS
40 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
41 #endif
42
43 // Implementation of StubAssembler
44
45 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
46 // for sparc changing the number of arguments doesn't change
47 // anything about the frame size so we'll always lie and claim that
48 // we are only passing 1 argument.
49 set_num_rt_args(1);
50
51 assert_not_delayed();
52 // bang stack before going to runtime
53 set(-os::vm_page_size() + STACK_BIAS, G3_scratch);
54 st(G0, SP, G3_scratch);
55
56 // debugging support
57 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
58
59 set_last_Java_frame(SP, noreg);
60 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
61 save_thread(L7_thread_cache);
122 return call_RT(oop_result1, metadata_result, entry, 1);
123 }
124
125
126 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
127 // O0 is reserved for the thread
128 mov(arg1, O1);
129 mov(arg2, O2); assert(arg2 != O1, "smashed argument");
130 return call_RT(oop_result1, metadata_result, entry, 2);
131 }
132
133
134 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
135 // O0 is reserved for the thread
136 mov(arg1, O1);
137 mov(arg2, O2); assert(arg2 != O1, "smashed argument");
138 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument");
139 return call_RT(oop_result1, metadata_result, entry, 3);
140 }
141
142
143 // Implementation of Runtime1
144
145 #define __ sasm->
146
147 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
148 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
149 static int reg_save_size_in_words;
150 static int frame_size_in_bytes = -1;
151
152 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
153 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
154 "mismatch in calculation");
155 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
156 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
157 OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
158
159 int i;
160 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
161 Register r = as_Register(i);
162 if (r == G1 || r == G3 || r == G4 || r == G5) {
163 int sp_offset = cpu_reg_save_offsets[i];
164 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
165 r->as_VMReg());
166 }
167 }
168
169 if (save_fpu_registers) {
170 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
171 FloatRegister r = as_FloatRegister(i);
172 int sp_offset = fpu_reg_save_offsets[i];
173 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
174 r->as_VMReg());
175 }
176 }
177 return oop_map;
178 }
179
180 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
181 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
182 "mismatch in calculation");
183 __ save_frame_c1(frame_size_in_bytes);
184
185 // Record volatile registers as callee-save values in an OopMap so their save locations will be
186 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
187 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
188 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
189 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
190 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))
191
192 int i;
193 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
194 Register r = as_Register(i);
195 if (r == G1 || r == G3 || r == G4 || r == G5) {
196 int sp_offset = cpu_reg_save_offsets[i];
197 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
198 }
199 }
200
201 if (save_fpu_registers) {
202 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
203 FloatRegister r = as_FloatRegister(i);
204 int sp_offset = fpu_reg_save_offsets[i];
205 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
206 }
207 }
208
209 return generate_oop_map(sasm, save_fpu_registers);
210 }
211
212 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
213 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
214 Register r = as_Register(i);
215 if (r == G1 || r == G3 || r == G4 || r == G5) {
216 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
217 }
218 }
219
220 if (restore_fpu_registers) {
221 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
222 FloatRegister r = as_FloatRegister(i);
223 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
224 }
225 }
226 }
227
228
229 void Runtime1::initialize_pd() {
230 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
231 //
232 // A stub routine will have a frame that is at least large enough to hold
233 // a register window save area (obviously) and the volatile g registers
234 // and floating registers. A user of save_live_registers can have a frame
235 // that has more scratch area in it (although typically they will use L-regs).
236 // in that case the frame will look like this (stack growing down)
237 //
238 // FP -> | |
239 // | scratch mem |
240 // | " " |
241 // --------------
242 // | float regs |
243 // | " " |
244 // ---------------
245 // | G regs |
246 // | " " |
247 // ---------------
819 break;
820
821 case dtrace_object_alloc_id:
822 { // O0: object
823 __ set_info("dtrace_object_alloc", dont_gc_arguments);
824 // we can't gc here so skip the oopmap but make sure that all
825 // the live registers get saved.
826 save_live_registers(sasm);
827
828 __ save_thread(L7_thread_cache);
829 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
830 relocInfo::runtime_call_type);
831 __ delayed()->mov(I0, O0);
832 __ restore_thread(L7_thread_cache);
833
834 restore_live_registers(sasm);
835 __ ret();
836 __ delayed()->restore();
837 }
838 break;
839
840 #if INCLUDE_ALL_GCS
841 case g1_pre_barrier_slow_id:
842 { // G4: previous value of memory
843 BarrierSet* bs = Universe::heap()->barrier_set();
844 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
845 __ save_frame(0);
846 __ set((int)id, O1);
847 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
848 __ should_not_reach_here();
849 break;
850 }
851
852 __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
853
854 Register pre_val = G4;
855 Register tmp = G1_scratch;
856 Register tmp2 = G3_scratch;
857
858 Label refill, restart;
859 int satb_q_active_byte_offset =
860 in_bytes(JavaThread::satb_mark_queue_offset() +
861 SATBMarkQueue::byte_offset_of_active());
862 int satb_q_index_byte_offset =
863 in_bytes(JavaThread::satb_mark_queue_offset() +
864 SATBMarkQueue::byte_offset_of_index());
865 int satb_q_buf_byte_offset =
866 in_bytes(JavaThread::satb_mark_queue_offset() +
867 SATBMarkQueue::byte_offset_of_buf());
868
869 // Is marking still active?
870 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
871 __ ld(G2_thread, satb_q_active_byte_offset, tmp);
872 } else {
873 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
874 __ ldsb(G2_thread, satb_q_active_byte_offset, tmp);
875 }
876 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart);
877 __ retl();
878 __ delayed()->nop();
879
880 __ bind(restart);
881 // Load the index into the SATB buffer. SATBMarkQueue::_index is a
882 // size_t so ld_ptr is appropriate
883 __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
884
885 // index == 0?
886 __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
887
888 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
889 __ sub(tmp, oopSize, tmp);
890
891 __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
892 // Use return-from-leaf
893 __ retl();
894 __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
895
896 __ bind(refill);
897
898 save_live_registers(sasm);
899
900 __ call_VM_leaf(L7_thread_cache,
901 CAST_FROM_FN_PTR(address,
902 SATBMarkQueueSet::handle_zero_index_for_thread),
903 G2_thread);
904
905 restore_live_registers(sasm);
906
907 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
908 __ delayed()->restore();
909 }
910 break;
911
912 case g1_post_barrier_slow_id:
913 {
914 BarrierSet* bs = Universe::heap()->barrier_set();
915 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
916 __ save_frame(0);
917 __ set((int)id, O1);
918 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
919 __ should_not_reach_here();
920 break;
921 }
922
923 __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
924
925 Register addr = G4;
926 Register cardtable = G5;
927 Register tmp = G1_scratch;
928 Register tmp2 = G3_scratch;
929 jbyte* byte_map_base = barrier_set_cast<CardTableModRefBS>(bs)->byte_map_base;
930
931 Label not_already_dirty, restart, refill, young_card;
932
933 __ srlx(addr, CardTableModRefBS::card_shift, addr);
934
935 AddressLiteral rs(byte_map_base);
936 __ set(rs, cardtable); // cardtable := <card table base>
937 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
938
939 __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
940
941 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
942 __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
943
944 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
945 __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
946
947 __ bind(young_card);
948 // We didn't take the branch, so we're already dirty: return.
949 // Use return-from-leaf
950 __ retl();
951 __ delayed()->nop();
952
953 // Not dirty.
954 __ bind(not_already_dirty);
955
956 // Get cardtable + tmp into a reg by itself
957 __ add(addr, cardtable, tmp2);
958
959 // First, dirty it.
960 __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
961
962 Register tmp3 = cardtable;
963 Register tmp4 = tmp;
964
965 // these registers are now dead
966 addr = cardtable = tmp = noreg;
967
968 int dirty_card_q_index_byte_offset =
969 in_bytes(JavaThread::dirty_card_queue_offset() +
970 DirtyCardQueue::byte_offset_of_index());
971 int dirty_card_q_buf_byte_offset =
972 in_bytes(JavaThread::dirty_card_queue_offset() +
973 DirtyCardQueue::byte_offset_of_buf());
974
975 __ bind(restart);
976
977 // Get the index into the update buffer. DirtyCardQueue::_index is
978 // a size_t so ld_ptr is appropriate here.
979 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
980
981 // index == 0?
982 __ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill);
983
984 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
985 __ sub(tmp3, oopSize, tmp3);
986
987 __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
988 // Use return-from-leaf
989 __ retl();
990 __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
991
992 __ bind(refill);
993
994 save_live_registers(sasm);
995
996 __ call_VM_leaf(L7_thread_cache,
997 CAST_FROM_FN_PTR(address,
998 DirtyCardQueueSet::handle_zero_index_for_thread),
999 G2_thread);
1000
1001 restore_live_registers(sasm);
1002
1003 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
1004 __ delayed()->restore();
1005 }
1006 break;
1007 #endif // INCLUDE_ALL_GCS
1008
1009 case predicate_failed_trap_id:
1010 {
1011 __ set_info("predicate_failed_trap", dont_gc_arguments);
1012 OopMap* oop_map = save_live_registers(sasm);
1013
1014 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1015
1016 oop_maps = new OopMapSet();
1017 oop_maps->add_gc_map(call_offset, oop_map);
1018
1019 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1020 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1021 restore_live_registers(sasm);
1022
1023 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
1024 __ jump_to(dest, O0);
1025 __ delayed()->restore();
1026 }
1027 break;
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "nativeInst_sparc.hpp"
31 #include "oops/compiledICHolder.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/signature.hpp"
36 #include "runtime/vframeArray.hpp"
37 #include "utilities/macros.hpp"
38 #include "vmreg_sparc.inline.hpp"
39
40 // Implementation of StubAssembler
41
42 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
43 // for sparc changing the number of arguments doesn't change
44 // anything about the frame size so we'll always lie and claim that
45 // we are only passing 1 argument.
46 set_num_rt_args(1);
47
48 assert_not_delayed();
49 // bang stack before going to runtime
50 set(-os::vm_page_size() + STACK_BIAS, G3_scratch);
51 st(G0, SP, G3_scratch);
52
53 // debugging support
54 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
55
56 set_last_Java_frame(SP, noreg);
57 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
58 save_thread(L7_thread_cache);
119 return call_RT(oop_result1, metadata_result, entry, 1);
120 }
121
122
123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
124 // O0 is reserved for the thread
125 mov(arg1, O1);
126 mov(arg2, O2); assert(arg2 != O1, "smashed argument");
127 return call_RT(oop_result1, metadata_result, entry, 2);
128 }
129
130
131 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
132 // O0 is reserved for the thread
133 mov(arg1, O1);
134 mov(arg2, O2); assert(arg2 != O1, "smashed argument");
135 mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument");
136 return call_RT(oop_result1, metadata_result, entry, 3);
137 }
138
139 void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
140 set_info(name, must_gc_arguments);
141 }
142
143 void StubAssembler::epilogue() {
144 delayed()->restore();
145 }
146
147 // Implementation of Runtime1
148
149
150 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
151 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
152 static int reg_save_size_in_words;
153 static int frame_size_in_bytes = -1;
154
155 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
156 assert(frame_size_in_bytes == sasm->total_frame_size_in_bytes(reg_save_size_in_words),
157 "mismatch in calculation");
158 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
159 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
160 OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
161
162 int i;
163 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
164 Register r = as_Register(i);
165 if (r == G1 || r == G3 || r == G4 || r == G5) {
166 int sp_offset = cpu_reg_save_offsets[i];
167 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
168 r->as_VMReg());
169 }
170 }
171
172 if (save_fpu_registers) {
173 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
174 FloatRegister r = as_FloatRegister(i);
175 int sp_offset = fpu_reg_save_offsets[i];
176 oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
177 r->as_VMReg());
178 }
179 }
180 return oop_map;
181 }
182
183 #define __ this->
184
185 void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) {
186 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
187 "mismatch in calculation");
188 __ save_frame_c1(frame_size_in_bytes);
189
190 // Record volatile registers as callee-save values in an OopMap so their save locations will be
191 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
192 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers
193 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
194 // (as the stub's I's) when the runtime routine called by the stub creates its frame.
195 // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))
196
197 int i;
198 for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
199 Register r = as_Register(i);
200 if (r == G1 || r == G3 || r == G4 || r == G5) {
201 int sp_offset = cpu_reg_save_offsets[i];
202 __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
203 }
204 }
205
206 if (save_fpu_registers) {
207 for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
208 FloatRegister r = as_FloatRegister(i);
209 int sp_offset = fpu_reg_save_offsets[i];
210 __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
211 }
212 }
213 }
214
215 void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
216 for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
217 Register r = as_Register(i);
218 if (r == G1 || r == G3 || r == G4 || r == G5) {
219 __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
220 }
221 }
222
223 if (restore_fpu_registers) {
224 for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
225 FloatRegister r = as_FloatRegister(i);
226 __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
227 }
228 }
229 }
230
231 #undef __
232 #define __ sasm->
233
234 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
235 sasm->save_live_registers_no_oop_map(save_fpu_registers);
236 return generate_oop_map(sasm, save_fpu_registers);
237 }
238
239 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
240 sasm->restore_live_registers(restore_fpu_registers);
241 }
242
243
244 void Runtime1::initialize_pd() {
245 // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
246 //
247 // A stub routine will have a frame that is at least large enough to hold
248 // a register window save area (obviously) and the volatile g registers
249 // and floating registers. A user of save_live_registers can have a frame
250 // that has more scratch area in it (although typically they will use L-regs).
251 // in that case the frame will look like this (stack growing down)
252 //
253 // FP -> | |
254 // | scratch mem |
255 // | " " |
256 // --------------
257 // | float regs |
258 // | " " |
259 // ---------------
260 // | G regs |
261 // | " " |
262 // ---------------
834 break;
835
836 case dtrace_object_alloc_id:
837 { // O0: object
838 __ set_info("dtrace_object_alloc", dont_gc_arguments);
839 // we can't gc here so skip the oopmap but make sure that all
840 // the live registers get saved.
841 save_live_registers(sasm);
842
843 __ save_thread(L7_thread_cache);
844 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
845 relocInfo::runtime_call_type);
846 __ delayed()->mov(I0, O0);
847 __ restore_thread(L7_thread_cache);
848
849 restore_live_registers(sasm);
850 __ ret();
851 __ delayed()->restore();
852 }
853 break;
854
855 case predicate_failed_trap_id:
856 {
857 __ set_info("predicate_failed_trap", dont_gc_arguments);
858 OopMap* oop_map = save_live_registers(sasm);
859
860 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
861
862 oop_maps = new OopMapSet();
863 oop_maps->add_gc_map(call_offset, oop_map);
864
865 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
866 assert(deopt_blob != NULL, "deoptimization blob must have been created");
867 restore_live_registers(sasm);
868
869 AddressLiteral dest(deopt_blob->unpack_with_reexecution());
870 __ jump_to(dest, O0);
871 __ delayed()->restore();
872 }
873 break;
|