1 /*
  2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "c1/c1_Defs.hpp"
 28 #include "c1/c1_MacroAssembler.hpp"
 29 #include "c1/c1_Runtime1.hpp"
 30 #include "ci/ciUtilities.hpp"
 31 #include "gc/shared/cardTable.hpp"
 32 #include "gc/shared/cardTableBarrierSet.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "nativeInst_ppc.hpp"
 35 #include "oops/compiledICHolder.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "prims/jvmtiExport.hpp"
 38 #include "register_ppc.hpp"
 39 #include "runtime/sharedRuntime.hpp"
 40 #include "runtime/signature.hpp"
 41 #include "runtime/vframeArray.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/macros.hpp"
 44 #include "vmreg_ppc.inline.hpp"
 45 
 46 // Implementation of StubAssembler
 47 
 48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
 49                            address entry_point, int number_of_arguments) {
 50   set_num_rt_args(0); // Nothing on stack
 51   assert(!(oop_result1->is_valid() || metadata_result->is_valid()) ||
 52          oop_result1 != metadata_result, "registers must be different");
 53 
 54   // Currently no stack banging. We assume that there are enough
 55   // StackShadowPages (which have been banged in generate_stack_overflow_check)
 56   // for the stub frame and the runtime frames.
 57 
 58   set_last_Java_frame(R1_SP, noreg);
 59 
 60   // ARG1 must hold thread address.
 61   mr(R3_ARG1, R16_thread);
 62 
 63   address return_pc = call_c_with_frame_resize(entry_point, /*No resize, we have a C compatible frame.*/0);
 64 
 65   reset_last_Java_frame();
 66 
 67   // Check for pending exceptions.
 68   {
 69     ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread);
 70     cmpdi(CCR0, R0, 0);
 71 
 72     // This used to conditionally jump to forward_exception however it is
 73     // possible if we relocate that the branch will not reach. So we must jump
 74     // around so we can always reach.
 75 
 76     Label ok;
 77     beq(CCR0, ok);
 78 
 79     // Make sure that the vm_results are cleared.
 80     if (oop_result1->is_valid() || metadata_result->is_valid()) {
 81       li(R0, 0);
 82       if (oop_result1->is_valid()) {
 83         std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
 84       }
 85       if (metadata_result->is_valid()) {
 86         std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
 87       }
 88     }
 89 
 90     if (frame_size() == no_frame_size) {
 91       ShouldNotReachHere(); // We always have a frame size.
 92       //pop_frame(); // pop the stub frame
 93       //ld(R0, _abi(lr), R1_SP);
 94       //mtlr(R0);
 95       //load_const_optimized(R0, StubRoutines::forward_exception_entry());
 96       //mtctr(R0);
 97       //bctr();
 98     } else if (_stub_id == Runtime1::forward_exception_id) {
 99       should_not_reach_here();
100     } else {
101       // keep stub frame for next call_RT
102       //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id));
103       add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id)));
104       mtctr(R0);
105       bctr();
106     }
107 
108     bind(ok);
109   }
110 
111   // Get oop results if there are any and reset the values in the thread.
112   if (oop_result1->is_valid()) {
113     get_vm_result(oop_result1);
114   }
115   if (metadata_result->is_valid()) {
116     get_vm_result_2(metadata_result);
117   }
118 
119   return (int)(return_pc - code_section()->start());
120 }
121 
122 
123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
124   mr_if_needed(R4_ARG2, arg1);
125   return call_RT(oop_result1, metadata_result, entry, 1);
126 }
127 
128 
129 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
130   mr_if_needed(R4_ARG2, arg1);
131   mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument");
132   return call_RT(oop_result1, metadata_result, entry, 2);
133 }
134 
135 
136 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
137   mr_if_needed(R4_ARG2, arg1);
138   mr_if_needed(R5_ARG3, arg2); assert(arg2 != R4_ARG2, "smashed argument");
139   mr_if_needed(R6_ARG4, arg3); assert(arg3 != R4_ARG2 && arg3 != R5_ARG3, "smashed argument");
140   return call_RT(oop_result1, metadata_result, entry, 3);
141 }
142 
143 
144 // Implementation of Runtime1
145 
146 #define __ sasm->
147 
148 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
149 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
150 static int frame_size_in_bytes = -1;
151 
152 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
153   assert(frame_size_in_bytes > frame::abi_reg_args_size, "init");
154   sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
155   int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
156   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
157 
158   int i;
159   for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
160     Register r = as_Register(i);
161     if (FrameMap::reg_needs_save(r)) {
162       int sp_offset = cpu_reg_save_offsets[i];
163       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg());
164       oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next());
165     }
166   }
167 
168   if (save_fpu_registers) {
169     for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
170       FloatRegister r = as_FloatRegister(i);
171       int sp_offset = fpu_reg_save_offsets[i];
172       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset>>2), r->as_VMReg());
173       oop_map->set_callee_saved(VMRegImpl::stack2reg((sp_offset>>2) + 1), r->as_VMReg()->next());
174     }
175   }
176 
177   return oop_map;
178 }
179 
180 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true,
181                                    Register ret_pc = noreg, int stack_preserve = 0) {
182   if (ret_pc == noreg) {
183     ret_pc = R0;
184     __ mflr(ret_pc);
185   }
186   __ std(ret_pc, _abi(lr), R1_SP); // C code needs pc in C1 method.
187   __ push_frame(frame_size_in_bytes + stack_preserve, R0);
188 
189   // Record volatile registers as callee-save values in an OopMap so
190   // their save locations will be propagated to the caller frame's
191   // RegisterMap during StackFrameStream construction (needed for
192   // deoptimization; see compiledVFrame::create_stack_value).
193   // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)).
194 
195   int i;
196   for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
197     Register r = as_Register(i);
198     if (FrameMap::reg_needs_save(r)) {
199       int sp_offset = cpu_reg_save_offsets[i];
200       __ std(r, sp_offset + STACK_BIAS, R1_SP);
201     }
202   }
203 
204   if (save_fpu_registers) {
205     for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
206       FloatRegister r = as_FloatRegister(i);
207       int sp_offset = fpu_reg_save_offsets[i];
208       __ stfd(r, sp_offset + STACK_BIAS, R1_SP);
209     }
210   }
211 
212   return generate_oop_map(sasm, save_fpu_registers);
213 }
214 
215 static void restore_live_registers(StubAssembler* sasm, Register result1, Register result2,
216                                    bool restore_fpu_registers = true) {
217   for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
218     Register r = as_Register(i);
219     if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) {
220       int sp_offset = cpu_reg_save_offsets[i];
221       __ ld(r, sp_offset + STACK_BIAS, R1_SP);
222     }
223   }
224 
225   if (restore_fpu_registers) {
226     for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
227       FloatRegister r = as_FloatRegister(i);
228       int sp_offset = fpu_reg_save_offsets[i];
229       __ lfd(r, sp_offset + STACK_BIAS, R1_SP);
230     }
231   }
232 
233   __ pop_frame();
234   __ ld(R0, _abi(lr), R1_SP);
235   __ mtlr(R0);
236 }
237 
238 
239 void Runtime1::initialize_pd() {
240   int i;
241   int sp_offset = frame::abi_reg_args_size;
242 
243   for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
244     Register r = as_Register(i);
245     if (FrameMap::reg_needs_save(r)) {
246       cpu_reg_save_offsets[i] = sp_offset;
247       sp_offset += BytesPerWord;
248     }
249   }
250 
251   for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
252     fpu_reg_save_offsets[i] = sp_offset;
253     sp_offset += BytesPerWord;
254   }
255   frame_size_in_bytes = align_up(sp_offset, frame::alignment_in_bytes);
256 }
257 
258 
259 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
260   // Make a frame and preserve the caller's caller-save registers.
261   OopMap* oop_map = save_live_registers(sasm);
262 
263   int call_offset;
264   if (!has_argument) {
265     call_offset = __ call_RT(noreg, noreg, target);
266   } else {
267     call_offset = __ call_RT(noreg, noreg, target, R4_ARG2);
268   }
269   OopMapSet* oop_maps = new OopMapSet();
270   oop_maps->add_gc_map(call_offset, oop_map);
271 
272   __ should_not_reach_here();
273   return oop_maps;
274 }
275 
276 static OopMapSet* generate_exception_throw_with_stack_parms(StubAssembler* sasm, address target,
277                                                             int stack_parms) {
278   // Make a frame and preserve the caller's caller-save registers.
279   const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
280   const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
281   OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
282 
283   int call_offset = 0;
284   switch (stack_parms) {
285     case 3:
286     __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP);
287     case 2:
288     __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
289     case 1:
290     __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
291     case 0:
292     call_offset = __ call_RT(noreg, noreg, target);
293     break;
294     default: Unimplemented(); break;
295   }
296   OopMapSet* oop_maps = new OopMapSet();
297   oop_maps->add_gc_map(call_offset, oop_map);
298 
299   __ should_not_reach_here();
300   return oop_maps;
301 }
302 
303 
304 OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target,
305                                         Register arg1, Register arg2, Register arg3) {
306   // Make a frame and preserve the caller's caller-save registers.
307   OopMap* oop_map = save_live_registers(sasm);
308 
309   int call_offset;
310   if (arg1 == noreg) {
311     call_offset = __ call_RT(result, noreg, target);
312   } else if (arg2 == noreg) {
313     call_offset = __ call_RT(result, noreg, target, arg1);
314   } else if (arg3 == noreg) {
315     call_offset = __ call_RT(result, noreg, target, arg1, arg2);
316   } else {
317     call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3);
318   }
319   OopMapSet* oop_maps = new OopMapSet();
320   oop_maps->add_gc_map(call_offset, oop_map);
321 
322   restore_live_registers(sasm, result, noreg);
323   __ blr();
324   return oop_maps;
325 }
326 
327 static OopMapSet* stub_call_with_stack_parms(StubAssembler* sasm, Register result, address target,
328                                              int stack_parms, bool do_return = true) {
329   // Make a frame and preserve the caller's caller-save registers.
330   const int parm_size_in_bytes = align_up(stack_parms << LogBytesPerWord, frame::alignment_in_bytes);
331   const int padding = parm_size_in_bytes - (stack_parms << LogBytesPerWord);
332   OopMap* oop_map = save_live_registers(sasm, true, noreg, parm_size_in_bytes);
333 
334   int call_offset = 0;
335   switch (stack_parms) {
336     case 3:
337     __ ld(R6_ARG4, frame_size_in_bytes + padding + 16, R1_SP);
338     case 2:
339     __ ld(R5_ARG3, frame_size_in_bytes + padding + 8, R1_SP);
340     case 1:
341     __ ld(R4_ARG2, frame_size_in_bytes + padding + 0, R1_SP);
342     case 0:
343     call_offset = __ call_RT(result, noreg, target);
344     break;
345     default: Unimplemented(); break;
346   }
347   OopMapSet* oop_maps = new OopMapSet();
348   oop_maps->add_gc_map(call_offset, oop_map);
349 
350   restore_live_registers(sasm, result, noreg);
351   if (do_return) __ blr();
352   return oop_maps;
353 }
354 
355 
356 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
357   // Make a frame and preserve the caller's caller-save registers.
358   OopMap* oop_map = save_live_registers(sasm);
359 
360   // Call the runtime patching routine, returns non-zero if nmethod got deopted.
361   int call_offset = __ call_RT(noreg, noreg, target);
362   OopMapSet* oop_maps = new OopMapSet();
363   oop_maps->add_gc_map(call_offset, oop_map);
364   __ cmpdi(CCR0, R3_RET, 0);
365 
366   // Re-execute the patched instruction or, if the nmethod was deoptmized,
367   // return to the deoptimization handler entry that will cause re-execution
368   // of the current bytecode.
369   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
370   assert(deopt_blob != NULL, "deoptimization blob must have been created");
371 
372   // Return to the deoptimization handler entry for unpacking and rexecute.
373   // If we simply returned the we'd deopt as if any call we patched had just
374   // returned.
375 
376   restore_live_registers(sasm, noreg, noreg);
377   // Return if patching routine returned 0.
378   __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
379 
380   address stub = deopt_blob->unpack_with_reexecution();
381   //__ load_const_optimized(R0, stub);
382   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
383   __ mtctr(R0);
384   __ bctr();
385 
386   return oop_maps;
387 }
388 
389 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
390   OopMapSet* oop_maps = NULL;
391 
392   // For better readability.
393   const bool must_gc_arguments = true;
394   const bool dont_gc_arguments = false;
395 
396   // Stub code & info for the different stubs.
397   switch (id) {
398     case forward_exception_id:
399       {
400         oop_maps = generate_handle_exception(id, sasm);
401       }
402       break;
403 
404     case new_instance_id:
405     case fast_new_instance_id:
406     case fast_new_instance_init_check_id:
407       {
408         if (id == new_instance_id) {
409           __ set_info("new_instance", dont_gc_arguments);
410         } else if (id == fast_new_instance_id) {
411           __ set_info("fast new_instance", dont_gc_arguments);
412         } else {
413           assert(id == fast_new_instance_init_check_id, "bad StubID");
414           __ set_info("fast new_instance init check", dont_gc_arguments);
415         }
416 
417         // We don't support eden allocation.
418 
419         oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_instance), R4_ARG2);
420       }
421       break;
422 
423     case counter_overflow_id:
424         // Bci and method are on stack.
425         oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2);
426       break;
427 
428     case new_type_array_id:
429     case new_object_array_id:
430       {
431         if (id == new_type_array_id) {
432           __ set_info("new_type_array", dont_gc_arguments);
433         } else {
434           __ set_info("new_object_array", dont_gc_arguments);
435         }
436 
437 #ifdef ASSERT
438         // Assert object type is really an array of the proper kind.
439         {
440           int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value;
441           Label ok;
442           __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2);
443           __ srawi(R0, R0, Klass::_lh_array_tag_shift);
444           __ cmpwi(CCR0, R0, tag);
445           __ beq(CCR0, ok);
446           __ stop("assert(is an array klass)");
447           __ should_not_reach_here();
448           __ bind(ok);
449         }
450 #endif // ASSERT
451 
452         // We don't support eden allocation.
453 
454         if (id == new_type_array_id) {
455           oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3);
456         } else {
457           oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3);
458         }
459       }
460       break;
461 
462     case new_multi_array_id:
463       {
464         // R4: klass
465         // R5: rank
466         // R6: address of 1st dimension
467         __ set_info("new_multi_array", dont_gc_arguments);
468         oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_multi_array), R4_ARG2, R5_ARG3, R6_ARG4);
469       }
470       break;
471 
472     case register_finalizer_id:
473       {
474         __ set_info("register_finalizer", dont_gc_arguments);
475         // This code is called via rt_call. Hence, caller-save registers have been saved.
476         Register t = R11_scratch1;
477 
478         // Load the klass and check the has finalizer flag.
479         __ load_klass(t, R3_ARG1);
480         __ lwz(t, in_bytes(Klass::access_flags_offset()), t);
481         __ testbitdi(CCR0, R0, t, exact_log2(JVM_ACC_HAS_FINALIZER));
482         // Return if has_finalizer bit == 0 (CR0.eq).
483         __ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
484 
485         __ mflr(R0);
486         __ std(R0, _abi(lr), R1_SP);
487         __ push_frame(frame::abi_reg_args_size, R0); // Empty dummy frame (no callee-save regs).
488         sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord);
489         OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0);
490         int call_offset = __ call_RT(noreg, noreg,
491                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), R3_ARG1);
492         oop_maps = new OopMapSet();
493         oop_maps->add_gc_map(call_offset, oop_map);
494 
495         __ pop_frame();
496         __ ld(R0, _abi(lr), R1_SP);
497         __ mtlr(R0);
498         __ blr();
499       }
500       break;
501 
502     case throw_range_check_failed_id:
503       {
504         __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded.
505         __ std(R0, -8, R1_SP); // Pass index on stack.
506         oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 1);
507       }
508       break;
509 
510     case throw_index_exception_id:
511       {
512         __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded.
513         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
514       }
515       break;
516 
517     case throw_div0_exception_id:
518       {
519         __ set_info("throw_div0_exception", dont_gc_arguments);
520         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
521       }
522       break;
523 
524     case throw_null_pointer_exception_id:
525       {
526         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
527         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
528       }
529       break;
530 
531     case handle_exception_nofpu_id:
532     case handle_exception_id:
533       {
534         __ set_info("handle_exception", dont_gc_arguments);
535         oop_maps = generate_handle_exception(id, sasm);
536       }
537       break;
538 
539     case handle_exception_from_callee_id:
540       {
541         __ set_info("handle_exception_from_callee", dont_gc_arguments);
542         oop_maps = generate_handle_exception(id, sasm);
543       }
544       break;
545 
546     case unwind_exception_id:
547       {
548         const Register Rexception    = R3 /*LIRGenerator::exceptionOopOpr()*/,
549                        Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/,
550                        Rexception_save = R31, Rcaller_sp = R30;
551         __ set_info("unwind_exception", dont_gc_arguments);
552 
553         __ ld(Rcaller_sp, 0, R1_SP);
554         __ push_frame_reg_args(0, R0); // dummy frame for C call
555         __ mr(Rexception_save, Rexception); // save over C call
556         __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc
557         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), R16_thread, Rexception_pc);
558         __ verify_not_null_oop(Rexception_save);
559         __ mtctr(R3_RET);
560         __ ld(Rexception_pc, _abi(lr), Rcaller_sp); // return pc
561         __ mr(R1_SP, Rcaller_sp); // Pop both frames at once.
562         __ mr(Rexception, Rexception_save); // restore
563         __ mtlr(Rexception_pc);
564         __ bctr();
565       }
566       break;
567 
568     case throw_array_store_exception_id:
569       {
570         __ set_info("throw_array_store_exception", dont_gc_arguments);
571         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
572       }
573       break;
574 
575     case throw_class_cast_exception_id:
576       {
577         __ set_info("throw_class_cast_exception", dont_gc_arguments);
578         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
579       }
580       break;
581 
582     case throw_incompatible_class_change_error_id:
583       {
584         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
585         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
586       }
587       break;
588 
589     case slow_subtype_check_id:
590       { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
591         const Register sub_klass = R5,
592                        super_klass = R4,
593                        temp1_reg = R6,
594                        temp2_reg = R0;
595         __ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful
596         __ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne
597         __ blr();
598       }
599       break;
600 
601     case monitorenter_nofpu_id:
602     case monitorenter_id:
603       {
604         __ set_info("monitorenter", dont_gc_arguments);
605 
606         int save_fpu_registers = (id == monitorenter_id);
607         // Make a frame and preserve the caller's caller-save registers.
608         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
609 
610         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), R4_ARG2, R5_ARG3);
611 
612         oop_maps = new OopMapSet();
613         oop_maps->add_gc_map(call_offset, oop_map);
614 
615         restore_live_registers(sasm, noreg, noreg, save_fpu_registers);
616         __ blr();
617       }
618       break;
619 
620     case monitorexit_nofpu_id:
621     case monitorexit_id:
622       {
623         // note: Really a leaf routine but must setup last java sp
624         //       => use call_RT for now (speed can be improved by
625         //       doing last java sp setup manually).
626         __ set_info("monitorexit", dont_gc_arguments);
627 
628         int save_fpu_registers = (id == monitorexit_id);
629         // Make a frame and preserve the caller's caller-save registers.
630         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
631 
632         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), R4_ARG2);
633 
634         oop_maps = new OopMapSet();
635         oop_maps->add_gc_map(call_offset, oop_map);
636 
637         restore_live_registers(sasm, noreg, noreg, save_fpu_registers);
638         __ blr();
639       }
640       break;
641 
642     case deoptimize_id:
643       {
644         __ set_info("deoptimize", dont_gc_arguments);
645         __ std(R0, -8, R1_SP); // Pass trap_request on stack.
646         oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, deoptimize), 1, /*do_return*/ false);
647 
648         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
649         assert(deopt_blob != NULL, "deoptimization blob must have been created");
650         address stub = deopt_blob->unpack_with_reexecution();
651         //__ load_const_optimized(R0, stub);
652         __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
653         __ mtctr(R0);
654         __ bctr();
655       }
656       break;
657 
658     case access_field_patching_id:
659       {
660         __ set_info("access_field_patching", dont_gc_arguments);
661         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
662       }
663       break;
664 
665     case load_klass_patching_id:
666       {
667         __ set_info("load_klass_patching", dont_gc_arguments);
668         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
669       }
670       break;
671 
672     case load_mirror_patching_id:
673       {
674         __ set_info("load_mirror_patching", dont_gc_arguments);
675         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
676       }
677       break;
678 
679     case load_appendix_patching_id:
680       {
681         __ set_info("load_appendix_patching", dont_gc_arguments);
682         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
683       }
684       break;
685 
686     case dtrace_object_alloc_id:
687       { // O0: object
688         __ unimplemented("stub dtrace_object_alloc_id");
689         __ set_info("dtrace_object_alloc", dont_gc_arguments);
690 //        // We can't gc here so skip the oopmap but make sure that all
691 //        // the live registers get saved.
692 //        save_live_registers(sasm);
693 //
694 //        __ save_thread(L7_thread_cache);
695 //        __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
696 //                relocInfo::runtime_call_type);
697 //        __ delayed()->mov(I0, O0);
698 //        __ restore_thread(L7_thread_cache);
699 //
700 //        restore_live_registers(sasm);
701 //        __ ret();
702 //        __ delayed()->restore();
703       }
704       break;
705 
706     case predicate_failed_trap_id:
707       {
708         __ set_info("predicate_failed_trap", dont_gc_arguments);
709         OopMap* oop_map = save_live_registers(sasm);
710 
711         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
712 
713         oop_maps = new OopMapSet();
714         oop_maps->add_gc_map(call_offset, oop_map);
715 
716         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
717         assert(deopt_blob != NULL, "deoptimization blob must have been created");
718         restore_live_registers(sasm, noreg, noreg);
719 
720         address stub = deopt_blob->unpack_with_reexecution();
721         //__ load_const_optimized(R0, stub);
722         __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
723         __ mtctr(R0);
724         __ bctr();
725       }
726       break;
727 
728   default:
729       {
730         __ set_info("unimplemented entry", dont_gc_arguments);
731         __ mflr(R0);
732         __ std(R0, _abi(lr), R1_SP);
733         __ push_frame(frame::abi_reg_args_size, R0); // empty dummy frame
734         sasm->set_frame_size(frame::abi_reg_args_size / BytesPerWord);
735         OopMap* oop_map = new OopMap(frame::abi_reg_args_size / sizeof(jint), 0);
736 
737         __ load_const_optimized(R4_ARG2, (int)id);
738         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R4_ARG2);
739 
740         oop_maps = new OopMapSet();
741         oop_maps->add_gc_map(call_offset, oop_map);
742         __ should_not_reach_here();
743       }
744       break;
745   }
746   return oop_maps;
747 }
748 
749 
750 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
751   __ block_comment("generate_handle_exception");
752 
753   // Save registers, if required.
754   OopMapSet* oop_maps = new OopMapSet();
755   OopMap* oop_map = NULL;
756   const Register Rexception    = R3 /*LIRGenerator::exceptionOopOpr()*/,
757                  Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/;
758 
759   switch (id) {
760   case forward_exception_id:
761     // We're handling an exception in the context of a compiled frame.
762     // The registers have been saved in the standard places. Perform
763     // an exception lookup in the caller and dispatch to the handler
764     // if found. Otherwise unwind and dispatch to the callers
765     // exception handler.
766     oop_map = generate_oop_map(sasm, true);
767     // Transfer the pending exception to the exception_oop.
768     // Also load the PC which is typically at SP + frame_size_in_bytes + _abi(lr),
769     // but we support additional slots in the frame for parameter passing.
770     __ ld(Rexception_pc, 0, R1_SP);
771     __ ld(Rexception, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
772     __ li(R0, 0);
773     __ ld(Rexception_pc, _abi(lr), Rexception_pc);
774     __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
775     break;
776   case handle_exception_nofpu_id:
777   case handle_exception_id:
778     // At this point all registers MAY be live.
779     oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc);
780     break;
781   case handle_exception_from_callee_id:
782     // At this point all registers except exception oop and exception pc are dead.
783     oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
784     sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
785     __ std(Rexception_pc, _abi(lr), R1_SP);
786     __ push_frame(frame_size_in_bytes, R0);
787     break;
788   default:  ShouldNotReachHere();
789   }
790 
791   __ verify_not_null_oop(Rexception);
792 
793 #ifdef ASSERT
794   // Check that fields in JavaThread for exception oop and issuing pc are
795   // empty before writing to them.
796   __ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
797   __ cmpdi(CCR0, R0, 0);
798   __ asm_assert_eq("exception oop already set", 0x963);
799   __ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
800   __ cmpdi(CCR0, R0, 0);
801   __ asm_assert_eq("exception pc already set", 0x962);
802 #endif
803 
804   // Save the exception and issuing pc in the thread.
805   __ std(Rexception,    in_bytes(JavaThread::exception_oop_offset()), R16_thread);
806   __ std(Rexception_pc, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
807 
808   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
809   oop_maps->add_gc_map(call_offset, oop_map);
810 
811   __ mtctr(R3_RET);
812 
813   // Note: if nmethod has been deoptimized then regardless of
814   // whether it had a handler or not we will deoptimize
815   // by entering the deopt blob with a pending exception.
816 
817   // Restore the registers that were saved at the beginning, remove
818   // the frame and jump to the exception handler.
819   switch (id) {
820   case forward_exception_id:
821   case handle_exception_nofpu_id:
822   case handle_exception_id:
823     restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id);
824     __ bctr();
825     break;
826   case handle_exception_from_callee_id: {
827     __ pop_frame();
828     __ ld(Rexception_pc, _abi(lr), R1_SP);
829     __ mtlr(Rexception_pc);
830     __ bctr();
831     break;
832   }
833   default:  ShouldNotReachHere();
834   }
835 
836   return oop_maps;
837 }
838 
839 const char *Runtime1::pd_name_for_address(address entry) {
840   return "<unknown function>";
841 }
842 
843 #undef __