1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interp_masm.hpp"
33 #include "oops/compiledICHolder.hpp"
34 #include "prims/jvmtiRedefineClassesTrace.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/vframeArray.hpp"
37 #include "vmreg_ppc.inline.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_Runtime1.hpp"
40 #endif
41 #ifdef COMPILER2
42 #include "adfiles/ad_ppc_64.hpp"
43 #include "opto/runtime.hpp"
44 #endif
45
46 #define __ masm->
47
48 #ifdef PRODUCT
49 #define BLOCK_COMMENT(str) // nothing
50 #else
177 RegisterSaver_LiveIntReg( R10 ),
178 RegisterSaver_LiveIntReg( R11 ),
179 RegisterSaver_LiveIntReg( R12 ),
180 //RegisterSaver_LiveIntReg( R13 ), // system thread id
181 RegisterSaver_LiveIntReg( R14 ),
182 RegisterSaver_LiveIntReg( R15 ),
183 RegisterSaver_LiveIntReg( R16 ),
184 RegisterSaver_LiveIntReg( R17 ),
185 RegisterSaver_LiveIntReg( R18 ),
186 RegisterSaver_LiveIntReg( R19 ),
187 RegisterSaver_LiveIntReg( R20 ),
188 RegisterSaver_LiveIntReg( R21 ),
189 RegisterSaver_LiveIntReg( R22 ),
190 RegisterSaver_LiveIntReg( R23 ),
191 RegisterSaver_LiveIntReg( R24 ),
192 RegisterSaver_LiveIntReg( R25 ),
193 RegisterSaver_LiveIntReg( R26 ),
194 RegisterSaver_LiveIntReg( R27 ),
195 RegisterSaver_LiveIntReg( R28 ),
196 RegisterSaver_LiveIntReg( R29 ),
197 RegisterSaver_LiveIntReg( R31 ),
198 RegisterSaver_LiveIntReg( R30 ), // r30 must be the last register
199 };
200
201 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
202 int* out_frame_size_in_bytes,
203 bool generate_oop_map,
204 int return_pc_adjustment,
205 ReturnPCLocation return_pc_location) {
206 // Push an abi_reg_args-frame and store all registers which may be live.
207 // If requested, create an OopMap: Record volatile registers as
208 // callee-save values in an OopMap so their save locations will be
209 // propagated to the RegisterMap of the caller frame during
210 // StackFrameStream construction (needed for deoptimization; see
211 // compiledVFrame::create_stack_value).
212 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
213
214 int i;
215 int offset;
216
217 // calcualte frame size
218 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
219 sizeof(RegisterSaver::LiveRegType);
220 const int register_save_size = regstosave_num * reg_size;
221 const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
222 + frame::abi_reg_args_size;
223 *out_frame_size_in_bytes = frame_size_in_bytes;
224 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
225 const int register_save_offset = frame_size_in_bytes - register_save_size;
226
227 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
228 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
229
230 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
231
232 // Save r30 in the last slot of the not yet pushed frame so that we
233 // can use it as scratch reg.
234 __ std(R30, -reg_size, R1_SP);
235 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
236 "consistency check");
237
238 // save the flags
239 // Do the save_LR_CR by hand and adjust the return pc if requested.
240 __ mfcr(R30);
241 __ std(R30, _abi(cr), R1_SP);
242 switch (return_pc_location) {
243 case return_pc_is_lr: __ mflr(R30); break;
244 case return_pc_is_r4: __ mr(R30, R4); break;
245 case return_pc_is_thread_saved_exception_pc:
246 __ ld(R30, thread_(saved_exception_pc)); break;
247 default: ShouldNotReachHere();
248 }
249 if (return_pc_adjustment != 0)
250 __ addi(R30, R30, return_pc_adjustment);
251 __ std(R30, _abi(lr), R1_SP);
252
253 // push a new frame
254 __ push_frame(frame_size_in_bytes, R30);
255
256 // save all registers (ints and floats)
257 offset = register_save_offset;
258 for (int i = 0; i < regstosave_num; i++) {
259 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
260 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
261
262 switch (reg_type) {
263 case RegisterSaver::int_reg: {
264 if (reg_num != 30) { // We spilled R30 right at the beginning.
265 __ std(as_Register(reg_num), offset, R1_SP);
266 }
267 break;
268 }
269 case RegisterSaver::float_reg: {
270 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
271 break;
272 }
273 case RegisterSaver::special_reg: {
274 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
275 __ mfctr(R30);
276 __ std(R30, offset, R1_SP);
277 } else {
278 Unimplemented();
279 }
280 break;
281 }
282 default:
283 ShouldNotReachHere();
284 }
285
286 if (generate_oop_map) {
287 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
288 RegisterSaver_LiveRegs[i].vmreg);
289 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
290 RegisterSaver_LiveRegs[i].vmreg->next());
291 }
292 offset += reg_size;
293 }
294
295 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
296
304 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
305 int frame_size_in_bytes,
306 bool restore_ctr) {
307 int i;
308 int offset;
309 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
310 sizeof(RegisterSaver::LiveRegType);
311 const int register_save_size = regstosave_num * reg_size;
312 const int register_save_offset = frame_size_in_bytes - register_save_size;
313
314 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
315
316 // restore all registers (ints and floats)
317 offset = register_save_offset;
318 for (int i = 0; i < regstosave_num; i++) {
319 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
320 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
321
322 switch (reg_type) {
323 case RegisterSaver::int_reg: {
324 if (reg_num != 30) // R30 restored at the end, it's the tmp reg!
325 __ ld(as_Register(reg_num), offset, R1_SP);
326 break;
327 }
328 case RegisterSaver::float_reg: {
329 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
330 break;
331 }
332 case RegisterSaver::special_reg: {
333 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
334 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
335 __ ld(R30, offset, R1_SP);
336 __ mtctr(R30);
337 }
338 } else {
339 Unimplemented();
340 }
341 break;
342 }
343 default:
344 ShouldNotReachHere();
345 }
346 offset += reg_size;
347 }
348
349 // pop the frame
350 __ pop_frame();
351
352 // restore the flags
353 __ restore_LR_CR(R30);
354
355 // restore scratch register's value
356 __ ld(R30, -reg_size, R1_SP);
357
358 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
359 }
360
361 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
362 int frame_size,int total_args, const VMRegPair *regs,
363 const VMRegPair *regs2) {
364 __ push_frame(frame_size, r_temp);
365 int st_off = frame_size - wordSize;
366 for (int i = 0; i < total_args; i++) {
367 VMReg r_1 = regs[i].first();
368 VMReg r_2 = regs[i].second();
369 if (!r_1->is_valid()) {
370 assert(!r_2->is_valid(), "");
371 continue;
372 }
373 if (r_1->is_Register()) {
374 Register r = r_1->as_Register();
375 __ std(r, st_off, R1_SP);
376 st_off -= wordSize;
2004 __ cmpdi(CCR0, R3_ARG1, 0);
2005 __ beq(CCR0, ic_miss);
2006 __ verify_oop(R3_ARG1);
2007 __ load_klass(receiver_klass, R3_ARG1);
2008
2009 __ cmpd(CCR0, receiver_klass, ic);
2010 __ bne(CCR0, ic_miss);
2011 }
2012
2013
2014 // Generate the Verified Entry Point (VEP).
2015 // --------------------------------------------------------------------------
2016 vep_start_pc = (intptr_t)__ pc();
2017
2018 __ save_LR_CR(r_temp_1);
2019 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2020 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
2021 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
2022 frame_done_pc = (intptr_t)__ pc();
2023
2024 // Native nmethod wrappers never take possesion of the oop arguments.
2025 // So the caller will gc the arguments.
2026 // The only thing we need an oopMap for is if the call is static.
2027 //
2028 // An OopMap for lock (and class if static), and one for the VM call itself.
2029 OopMapSet *oop_maps = new OopMapSet();
2030 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2031
2032 if (is_critical_native) {
2033 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1);
2034 }
2035
2036 // Move arguments from register/stack to register/stack.
2037 // --------------------------------------------------------------------------
2038 //
2039 // We immediately shuffle the arguments so that for any vm call we have
2040 // to make from here on out (sync slow path, jvmti, etc.) we will have
2041 // captured the oops from our caller and have a valid oopMap for them.
2042 //
2043 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2577 oop_maps);
2578
2579 if (is_critical_native) {
2580 nm->set_lazy_critical_native(true);
2581 }
2582
2583 return nm;
2584 #else
2585 ShouldNotReachHere();
2586 return NULL;
2587 #endif // COMPILER2
2588 }
2589
2590 // This function returns the adjust size (in number of words) to a c2i adapter
2591 // activation for use during deoptimization.
2592 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2593 return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
2594 }
2595
2596 uint SharedRuntime::out_preserve_stack_slots() {
2597 #ifdef COMPILER2
2598 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2599 #else
2600 return 0;
2601 #endif
2602 }
2603
2604 #ifdef COMPILER2
2605 // Frame generation for deopt and uncommon trap blobs.
2606 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2607 /* Read */
2608 Register unroll_block_reg,
2609 /* Update */
2610 Register frame_sizes_reg,
2611 Register number_of_frames_reg,
2612 Register pcs_reg,
2613 /* Invalidate */
2614 Register frame_size_reg,
2615 Register pc_reg) {
2616
2617 __ ld(pc_reg, 0, pcs_reg);
2850 __ reset_last_Java_frame();
2851 // Save the return value.
2852 __ mr(unroll_block_reg, R3_RET);
2853
2854 // Restore only the result registers that have been saved
2855 // by save_volatile_registers(...).
2856 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
2857
2858 // In excp_deopt_mode, restore and clear exception oop which we
2859 // stored in the thread during exception entry above. The exception
2860 // oop will be the return value of this stub.
2861 Label skip_restore_excp;
2862 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
2863 __ bne(CCR0, skip_restore_excp);
2864 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2865 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2866 __ li(R0, 0);
2867 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2868 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2869 __ BIND(skip_restore_excp);
2870
2871 // reload narrro_oop_base
2872 if (UseCompressedOops && Universe::narrow_oop_base() != 0) {
2873 __ load_const_optimized(R30, Universe::narrow_oop_base());
2874 }
2875
2876 __ pop_frame();
2877
2878 // stack: (deoptee, optional i2c, caller of deoptee, ...).
2879
2880 // pop the deoptee's frame
2881 __ pop_frame();
2882
2883 // stack: (caller_of_deoptee, ...).
2884
2885 // Loop through the `UnrollBlock' info and create interpreter frames.
2886 push_skeleton_frames(masm, true/*deopt*/,
2887 unroll_block_reg,
2888 R23_tmp3,
2889 R24_tmp4,
2890 R25_tmp5,
2891 R26_tmp6,
2892 R27_tmp7);
2893
2894 // stack: (skeletal interpreter frame, ..., optional skeletal
|
1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "frame_ppc.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interp_masm.hpp"
34 #include "oops/compiledICHolder.hpp"
35 #include "prims/jvmtiRedefineClassesTrace.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/vframeArray.hpp"
38 #include "vmreg_ppc.inline.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_Runtime1.hpp"
41 #endif
42 #ifdef COMPILER2
43 #include "adfiles/ad_ppc_64.hpp"
44 #include "opto/runtime.hpp"
45 #endif
46
47 #define __ masm->
48
49 #ifdef PRODUCT
50 #define BLOCK_COMMENT(str) // nothing
51 #else
178 RegisterSaver_LiveIntReg( R10 ),
179 RegisterSaver_LiveIntReg( R11 ),
180 RegisterSaver_LiveIntReg( R12 ),
181 //RegisterSaver_LiveIntReg( R13 ), // system thread id
182 RegisterSaver_LiveIntReg( R14 ),
183 RegisterSaver_LiveIntReg( R15 ),
184 RegisterSaver_LiveIntReg( R16 ),
185 RegisterSaver_LiveIntReg( R17 ),
186 RegisterSaver_LiveIntReg( R18 ),
187 RegisterSaver_LiveIntReg( R19 ),
188 RegisterSaver_LiveIntReg( R20 ),
189 RegisterSaver_LiveIntReg( R21 ),
190 RegisterSaver_LiveIntReg( R22 ),
191 RegisterSaver_LiveIntReg( R23 ),
192 RegisterSaver_LiveIntReg( R24 ),
193 RegisterSaver_LiveIntReg( R25 ),
194 RegisterSaver_LiveIntReg( R26 ),
195 RegisterSaver_LiveIntReg( R27 ),
196 RegisterSaver_LiveIntReg( R28 ),
197 RegisterSaver_LiveIntReg( R29 ),
198 RegisterSaver_LiveIntReg( R30 ),
199 RegisterSaver_LiveIntReg( R31 ), // must be the last register (see save/restore functions below)
200 };
201
202 OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssembler* masm,
203 int* out_frame_size_in_bytes,
204 bool generate_oop_map,
205 int return_pc_adjustment,
206 ReturnPCLocation return_pc_location) {
207 // Push an abi_reg_args-frame and store all registers which may be live.
208 // If requested, create an OopMap: Record volatile registers as
209 // callee-save values in an OopMap so their save locations will be
210 // propagated to the RegisterMap of the caller frame during
211 // StackFrameStream construction (needed for deoptimization; see
212 // compiledVFrame::create_stack_value).
213 // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
214
215 int i;
216 int offset;
217
218 // calcualte frame size
219 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
220 sizeof(RegisterSaver::LiveRegType);
221 const int register_save_size = regstosave_num * reg_size;
222 const int frame_size_in_bytes = round_to(register_save_size, frame::alignment_in_bytes)
223 + frame::abi_reg_args_size;
224 *out_frame_size_in_bytes = frame_size_in_bytes;
225 const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
226 const int register_save_offset = frame_size_in_bytes - register_save_size;
227
228 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
229 OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
230
231 BLOCK_COMMENT("push_frame_reg_args_and_save_live_registers {");
232
233 // Save r31 in the last slot of the not yet pushed frame so that we
234 // can use it as scratch reg.
235 __ std(R31, -reg_size, R1_SP);
236 assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
237 "consistency check");
238
239 // save the flags
240 // Do the save_LR_CR by hand and adjust the return pc if requested.
241 __ mfcr(R31);
242 __ std(R31, _abi(cr), R1_SP);
243 switch (return_pc_location) {
244 case return_pc_is_lr: __ mflr(R31); break;
245 case return_pc_is_r4: __ mr(R31, R4); break;
246 case return_pc_is_thread_saved_exception_pc:
247 __ ld(R31, thread_(saved_exception_pc)); break;
248 default: ShouldNotReachHere();
249 }
250 if (return_pc_adjustment != 0) {
251 __ addi(R31, R31, return_pc_adjustment);
252 }
253 __ std(R31, _abi(lr), R1_SP);
254
255 // push a new frame
256 __ push_frame(frame_size_in_bytes, R31);
257
258 // save all registers (ints and floats)
259 offset = register_save_offset;
260 for (int i = 0; i < regstosave_num; i++) {
261 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
262 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
263
264 switch (reg_type) {
265 case RegisterSaver::int_reg: {
266 if (reg_num != 31) { // We spilled R31 right at the beginning.
267 __ std(as_Register(reg_num), offset, R1_SP);
268 }
269 break;
270 }
271 case RegisterSaver::float_reg: {
272 __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
273 break;
274 }
275 case RegisterSaver::special_reg: {
276 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
277 __ mfctr(R31);
278 __ std(R31, offset, R1_SP);
279 } else {
280 Unimplemented();
281 }
282 break;
283 }
284 default:
285 ShouldNotReachHere();
286 }
287
288 if (generate_oop_map) {
289 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
290 RegisterSaver_LiveRegs[i].vmreg);
291 map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
292 RegisterSaver_LiveRegs[i].vmreg->next());
293 }
294 offset += reg_size;
295 }
296
297 BLOCK_COMMENT("} push_frame_reg_args_and_save_live_registers");
298
306 void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
307 int frame_size_in_bytes,
308 bool restore_ctr) {
309 int i;
310 int offset;
311 const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
312 sizeof(RegisterSaver::LiveRegType);
313 const int register_save_size = regstosave_num * reg_size;
314 const int register_save_offset = frame_size_in_bytes - register_save_size;
315
316 BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
317
318 // restore all registers (ints and floats)
319 offset = register_save_offset;
320 for (int i = 0; i < regstosave_num; i++) {
321 int reg_num = RegisterSaver_LiveRegs[i].reg_num;
322 int reg_type = RegisterSaver_LiveRegs[i].reg_type;
323
324 switch (reg_type) {
325 case RegisterSaver::int_reg: {
326 if (reg_num != 31) // R31 restored at the end, it's the tmp reg!
327 __ ld(as_Register(reg_num), offset, R1_SP);
328 break;
329 }
330 case RegisterSaver::float_reg: {
331 __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
332 break;
333 }
334 case RegisterSaver::special_reg: {
335 if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
336 if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
337 __ ld(R31, offset, R1_SP);
338 __ mtctr(R31);
339 }
340 } else {
341 Unimplemented();
342 }
343 break;
344 }
345 default:
346 ShouldNotReachHere();
347 }
348 offset += reg_size;
349 }
350
351 // pop the frame
352 __ pop_frame();
353
354 // restore the flags
355 __ restore_LR_CR(R31);
356
357 // restore scratch register's value
358 __ ld(R31, -reg_size, R1_SP);
359
360 BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
361 }
362
363 void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
364 int frame_size,int total_args, const VMRegPair *regs,
365 const VMRegPair *regs2) {
366 __ push_frame(frame_size, r_temp);
367 int st_off = frame_size - wordSize;
368 for (int i = 0; i < total_args; i++) {
369 VMReg r_1 = regs[i].first();
370 VMReg r_2 = regs[i].second();
371 if (!r_1->is_valid()) {
372 assert(!r_2->is_valid(), "");
373 continue;
374 }
375 if (r_1->is_Register()) {
376 Register r = r_1->as_Register();
377 __ std(r, st_off, R1_SP);
378 st_off -= wordSize;
2006 __ cmpdi(CCR0, R3_ARG1, 0);
2007 __ beq(CCR0, ic_miss);
2008 __ verify_oop(R3_ARG1);
2009 __ load_klass(receiver_klass, R3_ARG1);
2010
2011 __ cmpd(CCR0, receiver_klass, ic);
2012 __ bne(CCR0, ic_miss);
2013 }
2014
2015
2016 // Generate the Verified Entry Point (VEP).
2017 // --------------------------------------------------------------------------
2018 vep_start_pc = (intptr_t)__ pc();
2019
2020 __ save_LR_CR(r_temp_1);
2021 __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
2022 __ mr(r_callers_sp, R1_SP); // Remember frame pointer.
2023 __ push_frame(frame_size_in_bytes, r_temp_1); // Push the c2n adapter's frame.
2024 frame_done_pc = (intptr_t)__ pc();
2025
2026 __ verify_thread();
2027
2028 // Native nmethod wrappers never take possesion of the oop arguments.
2029 // So the caller will gc the arguments.
2030 // The only thing we need an oopMap for is if the call is static.
2031 //
2032 // An OopMap for lock (and class if static), and one for the VM call itself.
2033 OopMapSet *oop_maps = new OopMapSet();
2034 OopMap *oop_map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2035
2036 if (is_critical_native) {
2037 check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1);
2038 }
2039
2040 // Move arguments from register/stack to register/stack.
2041 // --------------------------------------------------------------------------
2042 //
2043 // We immediately shuffle the arguments so that for any vm call we have
2044 // to make from here on out (sync slow path, jvmti, etc.) we will have
2045 // captured the oops from our caller and have a valid oopMap for them.
2046 //
2047 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2581 oop_maps);
2582
2583 if (is_critical_native) {
2584 nm->set_lazy_critical_native(true);
2585 }
2586
2587 return nm;
2588 #else
2589 ShouldNotReachHere();
2590 return NULL;
2591 #endif // COMPILER2
2592 }
2593
2594 // This function returns the adjust size (in number of words) to a c2i adapter
2595 // activation for use during deoptimization.
2596 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2597 return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
2598 }
2599
2600 uint SharedRuntime::out_preserve_stack_slots() {
2601 #if defined(COMPILER1) || defined(COMPILER2)
2602 return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
2603 #else
2604 return 0;
2605 #endif
2606 }
2607
2608 #ifdef COMPILER2
2609 // Frame generation for deopt and uncommon trap blobs.
2610 static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
2611 /* Read */
2612 Register unroll_block_reg,
2613 /* Update */
2614 Register frame_sizes_reg,
2615 Register number_of_frames_reg,
2616 Register pcs_reg,
2617 /* Invalidate */
2618 Register frame_size_reg,
2619 Register pc_reg) {
2620
2621 __ ld(pc_reg, 0, pcs_reg);
2854 __ reset_last_Java_frame();
2855 // Save the return value.
2856 __ mr(unroll_block_reg, R3_RET);
2857
2858 // Restore only the result registers that have been saved
2859 // by save_volatile_registers(...).
2860 RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
2861
2862 // In excp_deopt_mode, restore and clear exception oop which we
2863 // stored in the thread during exception entry above. The exception
2864 // oop will be the return value of this stub.
2865 Label skip_restore_excp;
2866 __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
2867 __ bne(CCR0, skip_restore_excp);
2868 __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2869 __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2870 __ li(R0, 0);
2871 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
2872 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
2873 __ BIND(skip_restore_excp);
2874
2875 __ pop_frame();
2876
2877 // stack: (deoptee, optional i2c, caller of deoptee, ...).
2878
2879 // pop the deoptee's frame
2880 __ pop_frame();
2881
2882 // stack: (caller_of_deoptee, ...).
2883
2884 // Loop through the `UnrollBlock' info and create interpreter frames.
2885 push_skeleton_frames(masm, true/*deopt*/,
2886 unroll_block_reg,
2887 R23_tmp3,
2888 R24_tmp4,
2889 R25_tmp5,
2890 R26_tmp6,
2891 R27_tmp7);
2892
2893 // stack: (skeletal interpreter frame, ..., optional skeletal
|