1 /*
   2  * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/assembler.hpp"
  28 #include "c1/c1_CodeStubs.hpp"
  29 #include "c1/c1_Defs.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "compiler/disassembler.hpp"
  33 #include "gc/shared/cardTable.hpp"
  34 #include "gc/shared/cardTableBarrierSet.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "memory/universe.hpp"
  37 #include "nativeInst_aarch64.hpp"
  38 #include "oops/compiledICHolder.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "prims/jvmtiExport.hpp"
  41 #include "register_aarch64.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/signature.hpp"
  44 #include "runtime/vframe.hpp"
  45 #include "runtime/vframeArray.hpp"
  46 #include "vmreg_aarch64.inline.hpp"
  47 
  48 
  49 // Implementation of StubAssembler
  50 
  51 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
  52   // setup registers
  53   assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
  54   assert(oop_result1 != rthread && metadata_result != rthread, "registers must be different");
  55   assert(args_size >= 0, "illegal args_size");
  56   bool align_stack = false;
  57 
  58   mov(c_rarg0, rthread);
  59   set_num_rt_args(0); // Nothing on stack
  60 
  61   Label retaddr;
  62   set_last_Java_frame(sp, rfp, retaddr, rscratch1);
  63 
  64   // do the call
  65   lea(rscratch1, RuntimeAddress(entry));
  66   blrt(rscratch1, args_size + 1, 8, 1);
  67   bind(retaddr);
  68   int call_offset = offset();
  69   // verify callee-saved register
  70 #ifdef ASSERT
  71   push(r0, sp);
  72   { Label L;
  73     get_thread(r0);
  74     cmp(rthread, r0);
  75     br(Assembler::EQ, L);
  76     stop("StubAssembler::call_RT: rthread not callee saved?");
  77     bind(L);
  78   }
  79   pop(r0, sp);
  80 #endif
  81   reset_last_Java_frame(true);
  82   maybe_isb();
  83 
  84   // check for pending exceptions
  85   { Label L;
  86     // check for pending exceptions (java_thread is set upon return)
  87     ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset())));
  88     cbz(rscratch1, L);
  89     // exception pending => remove activation and forward to exception handler
  90     // make sure that the vm_results are cleared
  91     if (oop_result1->is_valid()) {
  92       str(zr, Address(rthread, JavaThread::vm_result_offset()));
  93     }
  94     if (metadata_result->is_valid()) {
  95       str(zr, Address(rthread, JavaThread::vm_result_2_offset()));
  96     }
  97     if (frame_size() == no_frame_size) {
  98       leave();
  99       far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
 100     } else if (_stub_id == Runtime1::forward_exception_id) {
 101       should_not_reach_here();
 102     } else {
 103       far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
 104     }
 105     bind(L);
 106   }
 107   // get oop results if there are any and reset the values in the thread
 108   if (oop_result1->is_valid()) {
 109     get_vm_result(oop_result1, rthread);
 110   }
 111   if (metadata_result->is_valid()) {
 112     get_vm_result_2(metadata_result, rthread);
 113   }
 114   return call_offset;
 115 }
 116 
 117 
 118 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
 119   mov(c_rarg1, arg1);
 120   return call_RT(oop_result1, metadata_result, entry, 1);
 121 }
 122 
 123 
 124 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
 125   if (c_rarg1 == arg2) {
 126     if (c_rarg2 == arg1) {
 127       mov(rscratch1, arg1);
 128       mov(arg1, arg2);
 129       mov(arg2, rscratch1);
 130     } else {
 131       mov(c_rarg2, arg2);
 132       mov(c_rarg1, arg1);
 133     }
 134   } else {
 135     mov(c_rarg1, arg1);
 136     mov(c_rarg2, arg2);
 137   }
 138   return call_RT(oop_result1, metadata_result, entry, 2);
 139 }
 140 
 141 
 142 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
 143   // if there is any conflict use the stack
 144   if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
 145       arg2 == c_rarg1 || arg1 == c_rarg3 ||
 146       arg3 == c_rarg1 || arg1 == c_rarg2) {
 147     stp(arg3, arg2, Address(pre(sp, 2 * wordSize)));
 148     stp(arg1, zr, Address(pre(sp, -2 * wordSize)));
 149     ldp(c_rarg1, zr, Address(post(sp, 2 * wordSize)));
 150     ldp(c_rarg3, c_rarg2, Address(post(sp, 2 * wordSize)));
 151   } else {
 152     mov(c_rarg1, arg1);
 153     mov(c_rarg2, arg2);
 154     mov(c_rarg3, arg3);
 155   }
 156   return call_RT(oop_result1, metadata_result, entry, 3);
 157 }
 158 
 159 // Implementation of StubFrame
 160 
 161 class StubFrame: public StackObj {
 162  private:
 163   StubAssembler* _sasm;
 164 
 165  public:
 166   StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
 167   void load_argument(int offset_in_words, Register reg);
 168 
 169   ~StubFrame();
 170 };;
 171 
 172 void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
 173   set_info(name, must_gc_arguments);
 174   enter();
 175 }
 176 
 177 void StubAssembler::epilogue() {
 178   leave();
 179   ret(lr);
 180 }
 181 
 182 #define __ _sasm->
 183 
 184 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
 185   _sasm = sasm;
 186   __ prologue(name, must_gc_arguments);
 187 }
 188 
 189 // load parameters that were stored with LIR_Assembler::store_parameter
 190 // Note: offsets for store_parameter and load_argument must match
 191 void StubFrame::load_argument(int offset_in_words, Register reg) {
 192   __ load_parameter(offset_in_words, reg);
 193 }
 194 
 195 
 196 StubFrame::~StubFrame() {
 197   __ epilogue();
 198 }
 199 
 200 #undef __
 201 
 202 
 203 // Implementation of Runtime1
 204 
 205 #define __ sasm->
 206 
 207 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
 208 
 209 // Stack layout for saving/restoring  all the registers needed during a runtime
 210 // call (this includes deoptimization)
 211 // Note: note that users of this frame may well have arguments to some runtime
 212 // while these values are on the stack. These positions neglect those arguments
 213 // but the code in save_live_registers will take the argument count into
 214 // account.
 215 //
 216 
 217 enum reg_save_layout {
 218   reg_save_frame_size = 32 /* float */ + 32 /* integer */
 219 };
 220 
 221 // Save off registers which might be killed by calls into the runtime.
 222 // Tries to smart of about FP registers.  In particular we separate
 223 // saving and describing the FPU registers for deoptimization since we
 224 // have to save the FPU registers twice if we describe them.  The
 225 // deopt blob is the only thing which needs to describe FPU registers.
 226 // In all other cases it should be sufficient to simply save their
 227 // current value.
 228 
 229 static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
 230 static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
 231 static int reg_save_size_in_words;
 232 static int frame_size_in_bytes = -1;
 233 
 234 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
 235   int frame_size_in_bytes = reg_save_frame_size * BytesPerWord;
 236   sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
 237   int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
 238   OopMap* oop_map = new OopMap(frame_size_in_slots, 0);
 239 
 240   for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
 241     Register r = as_Register(i);
 242     if (i <= 18 && i != rscratch1->encoding() && i != rscratch2->encoding()) {
 243       int sp_offset = cpu_reg_save_offsets[i];
 244       oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 245                                 r->as_VMReg());
 246     }
 247   }
 248 
 249   if (save_fpu_registers) {
 250     for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
 251       FloatRegister r = as_FloatRegister(i);
 252       {
 253         int sp_offset = fpu_reg_save_offsets[i];
 254         oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
 255                                   r->as_VMReg());
 256       }
 257     }
 258   }
 259   return oop_map;
 260 }
 261 
 262 static OopMap* save_live_registers(StubAssembler* sasm,
 263                                    bool save_fpu_registers = true) {
 264   __ block_comment("save_live_registers");
 265 
 266   __ push(RegSet::range(r0, r29), sp);         // integer registers except lr & sp
 267 
 268   if (save_fpu_registers) {
 269     for (int i = 31; i>= 0; i -= 4) {
 270       __ sub(sp, sp, 4 * wordSize); // no pre-increment for st1. Emulate it without modifying other registers
 271       __ st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1),
 272           as_FloatRegister(i), __ T1D, Address(sp));
 273     }
 274   } else {
 275     __ add(sp, sp, -32 * wordSize);
 276   }
 277 
 278   return generate_oop_map(sasm, save_fpu_registers);
 279 }
 280 
 281 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
 282   if (restore_fpu_registers) {
 283     for (int i = 0; i < 32; i += 4)
 284       __ ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
 285           as_FloatRegister(i+3), __ T1D, Address(__ post(sp, 4 * wordSize)));
 286   } else {
 287     __ add(sp, sp, 32 * wordSize);
 288   }
 289 
 290   __ pop(RegSet::range(r0, r29), sp);
 291 }
 292 
 293 static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true)  {
 294 
 295   if (restore_fpu_registers) {
 296     for (int i = 0; i < 32; i += 4)
 297       __ ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2),
 298           as_FloatRegister(i+3), __ T1D, Address(__ post(sp, 4 * wordSize)));
 299   } else {
 300     __ add(sp, sp, 32 * wordSize);
 301   }
 302 
 303   __ ldp(zr, r1, Address(__ post(sp, 16)));
 304   __ pop(RegSet::range(r2, r29), sp);
 305 }
 306 
 307 
 308 
 309 void Runtime1::initialize_pd() {
 310   int i;
 311   int sp_offset = 0;
 312 
 313   // all float registers are saved explicitly
 314   assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");
 315   for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
 316     fpu_reg_save_offsets[i] = sp_offset;
 317     sp_offset += 2;   // SP offsets are in halfwords
 318   }
 319 
 320   for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
 321     Register r = as_Register(i);
 322     cpu_reg_save_offsets[i] = sp_offset;
 323     sp_offset += 2;   // SP offsets are in halfwords
 324   }
 325 }
 326 
 327 
 328 // target: the entry point of the method that creates and posts the exception oop
 329 // has_argument: true if the exception needs arguments (passed in rscratch1 and rscratch2)
 330 
 331 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
 332   // make a frame and preserve the caller's caller-save registers
 333   OopMap* oop_map = save_live_registers(sasm);
 334   int call_offset;
 335   if (!has_argument) {
 336     call_offset = __ call_RT(noreg, noreg, target);
 337   } else {
 338     __ mov(c_rarg1, rscratch1);
 339     __ mov(c_rarg2, rscratch2);
 340     call_offset = __ call_RT(noreg, noreg, target);
 341   }
 342   OopMapSet* oop_maps = new OopMapSet();
 343   oop_maps->add_gc_map(call_offset, oop_map);
 344 
 345   __ should_not_reach_here();
 346   return oop_maps;
 347 }
 348 
 349 
 350 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
 351   __ block_comment("generate_handle_exception");
 352 
 353   // incoming parameters
 354   const Register exception_oop = r0;
 355   const Register exception_pc  = r3;
 356   // other registers used in this stub
 357 
 358   // Save registers, if required.
 359   OopMapSet* oop_maps = new OopMapSet();
 360   OopMap* oop_map = NULL;
 361   switch (id) {
 362   case forward_exception_id:
 363     // We're handling an exception in the context of a compiled frame.
 364     // The registers have been saved in the standard places.  Perform
 365     // an exception lookup in the caller and dispatch to the handler
 366     // if found.  Otherwise unwind and dispatch to the callers
 367     // exception handler.
 368     oop_map = generate_oop_map(sasm, 1 /*thread*/);
 369 
 370     // load and clear pending exception oop into r0
 371     __ ldr(exception_oop, Address(rthread, Thread::pending_exception_offset()));
 372     __ str(zr, Address(rthread, Thread::pending_exception_offset()));
 373 
 374     // load issuing PC (the return address for this stub) into r3
 375     __ ldr(exception_pc, Address(rfp, 1*BytesPerWord));
 376 
 377     // make sure that the vm_results are cleared (may be unnecessary)
 378     __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
 379     __ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));
 380     break;
 381   case handle_exception_nofpu_id:
 382   case handle_exception_id:
 383     // At this point all registers MAY be live.
 384     oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
 385     break;
 386   case handle_exception_from_callee_id: {
 387     // At this point all registers except exception oop (r0) and
 388     // exception pc (lr) are dead.
 389     const int frame_size = 2 /*fp, return address*/;
 390     oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
 391     sasm->set_frame_size(frame_size);
 392     break;
 393   }
 394   default:
 395     __ should_not_reach_here();
 396     break;
 397   }
 398 
 399   // verify that only r0 and r3 are valid at this time
 400   __ invalidate_registers(false, true, true, false, true, true);
 401   // verify that r0 contains a valid exception
 402   __ verify_not_null_oop(exception_oop);
 403 
 404 #ifdef ASSERT
 405   // check that fields in JavaThread for exception oop and issuing pc are
 406   // empty before writing to them
 407   Label oop_empty;
 408   __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
 409   __ cbz(rscratch1, oop_empty);
 410   __ stop("exception oop already set");
 411   __ bind(oop_empty);
 412 
 413   Label pc_empty;
 414   __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
 415   __ cbz(rscratch1, pc_empty);
 416   __ stop("exception pc already set");
 417   __ bind(pc_empty);
 418 #endif
 419 
 420   // save exception oop and issuing pc into JavaThread
 421   // (exception handler will load it from here)
 422   __ str(exception_oop, Address(rthread, JavaThread::exception_oop_offset()));
 423   __ str(exception_pc, Address(rthread, JavaThread::exception_pc_offset()));
 424 
 425   // patch throwing pc into return address (has bci & oop map)
 426   __ str(exception_pc, Address(rfp, 1*BytesPerWord));
 427 
 428   // compute the exception handler.
 429   // the exception oop and the throwing pc are read from the fields in JavaThread
 430   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
 431   oop_maps->add_gc_map(call_offset, oop_map);
 432 
 433   // r0: handler address
 434   //      will be the deopt blob if nmethod was deoptimized while we looked up
 435   //      handler regardless of whether handler existed in the nmethod.
 436 
 437   // only r0 is valid at this time, all other registers have been destroyed by the runtime call
 438   __ invalidate_registers(false, true, true, true, true, true);
 439 
 440   // patch the return address, this stub will directly return to the exception handler
 441   __ str(r0, Address(rfp, 1*BytesPerWord));
 442 
 443   switch (id) {
 444   case forward_exception_id:
 445   case handle_exception_nofpu_id:
 446   case handle_exception_id:
 447     // Restore the registers that were saved at the beginning.
 448     restore_live_registers(sasm, id != handle_exception_nofpu_id);
 449     break;
 450   case handle_exception_from_callee_id:
 451     // Pop the return address.
 452     __ leave();
 453     __ ret(lr);  // jump to exception handler
 454     break;
 455   default:  ShouldNotReachHere();
 456   }
 457 
 458   return oop_maps;
 459 }
 460 
 461 
 462 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
 463   // incoming parameters
 464   const Register exception_oop = r0;
 465   // callee-saved copy of exception_oop during runtime call
 466   const Register exception_oop_callee_saved = r19;
 467   // other registers used in this stub
 468   const Register exception_pc = r3;
 469   const Register handler_addr = r1;
 470 
 471   // verify that only r0, is valid at this time
 472   __ invalidate_registers(false, true, true, true, true, true);
 473 
 474 #ifdef ASSERT
 475   // check that fields in JavaThread for exception oop and issuing pc are empty
 476   Label oop_empty;
 477   __ ldr(rscratch1, Address(rthread, JavaThread::exception_oop_offset()));
 478   __ cbz(rscratch1, oop_empty);
 479   __ stop("exception oop must be empty");
 480   __ bind(oop_empty);
 481 
 482   Label pc_empty;
 483   __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
 484   __ cbz(rscratch1, pc_empty);
 485   __ stop("exception pc must be empty");
 486   __ bind(pc_empty);
 487 #endif
 488 
 489   // Save our return address because
 490   // exception_handler_for_return_address will destroy it.  We also
 491   // save exception_oop
 492   __ stp(lr, exception_oop, Address(__ pre(sp, -2 * wordSize)));
 493 
 494   // search the exception handler address of the caller (using the return address)
 495   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rthread, lr);
 496   // r0: exception handler address of the caller
 497 
 498   // Only R0 is valid at this time; all other registers have been
 499   // destroyed by the call.
 500   __ invalidate_registers(false, true, true, true, false, true);
 501 
 502   // move result of call into correct register
 503   __ mov(handler_addr, r0);
 504 
 505   // get throwing pc (= return address).
 506   // lr has been destroyed by the call
 507   __ ldp(lr, exception_oop, Address(__ post(sp, 2 * wordSize)));
 508   __ mov(r3, lr);
 509 
 510   __ verify_not_null_oop(exception_oop);
 511 
 512   // continue at exception handler (return address removed)
 513   // note: do *not* remove arguments when unwinding the
 514   //       activation since the caller assumes having
 515   //       all arguments on the stack when entering the
 516   //       runtime to determine the exception handler
 517   //       (GC happens at call site with arguments!)
 518   // r0: exception oop
 519   // r3: throwing pc
 520   // r1: exception handler
 521   __ br(handler_addr);
 522 }
 523 
 524 
 525 
 526 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 527   // use the maximum number of runtime-arguments here because it is difficult to
 528   // distinguish each RT-Call.
 529   // Note: This number affects also the RT-Call in generate_handle_exception because
 530   //       the oop-map is shared for all calls.
 531   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 532   assert(deopt_blob != NULL, "deoptimization blob must have been created");
 533 
 534   OopMap* oop_map = save_live_registers(sasm);
 535 
 536   __ mov(c_rarg0, rthread);
 537   Label retaddr;
 538   __ set_last_Java_frame(sp, rfp, retaddr, rscratch1);
 539   // do the call
 540   __ lea(rscratch1, RuntimeAddress(target));
 541   __ blrt(rscratch1, 1, 0, 1);
 542   __ bind(retaddr);
 543   OopMapSet* oop_maps = new OopMapSet();
 544   oop_maps->add_gc_map(__ offset(), oop_map);
 545   // verify callee-saved register
 546 #ifdef ASSERT
 547   { Label L;
 548     __ get_thread(rscratch1);
 549     __ cmp(rthread, rscratch1);
 550     __ br(Assembler::EQ, L);
 551     __ stop("StubAssembler::call_RT: rthread not callee saved?");
 552     __ bind(L);
 553   }
 554 #endif
 555   __ reset_last_Java_frame(true);
 556   __ maybe_isb();
 557 
 558   // check for pending exceptions
 559   { Label L;
 560     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 561     __ cbz(rscratch1, L);
 562     // exception pending => remove activation and forward to exception handler
 563 
 564     { Label L1;
 565       __ cbnz(r0, L1);                                  // have we deoptimized?
 566       __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
 567       __ bind(L1);
 568     }
 569 
 570     // the deopt blob expects exceptions in the special fields of
 571     // JavaThread, so copy and clear pending exception.
 572 
 573     // load and clear pending exception
 574     __ ldr(r0, Address(rthread, Thread::pending_exception_offset()));
 575     __ str(zr, Address(rthread, Thread::pending_exception_offset()));
 576 
 577     // check that there is really a valid exception
 578     __ verify_not_null_oop(r0);
 579 
 580     // load throwing pc: this is the return address of the stub
 581     __ mov(r3, lr);
 582 
 583 #ifdef ASSERT
 584     // check that fields in JavaThread for exception oop and issuing pc are empty
 585     Label oop_empty;
 586     __ ldr(rscratch1, Address(rthread, Thread::pending_exception_offset()));
 587     __ cbz(rscratch1, oop_empty);
 588     __ stop("exception oop must be empty");
 589     __ bind(oop_empty);
 590 
 591     Label pc_empty;
 592     __ ldr(rscratch1, Address(rthread, JavaThread::exception_pc_offset()));
 593     __ cbz(rscratch1, pc_empty);
 594     __ stop("exception pc must be empty");
 595     __ bind(pc_empty);
 596 #endif
 597 
 598     // store exception oop and throwing pc to JavaThread
 599     __ str(r0, Address(rthread, JavaThread::exception_oop_offset()));
 600     __ str(r3, Address(rthread, JavaThread::exception_pc_offset()));
 601 
 602     restore_live_registers(sasm);
 603 
 604     __ leave();
 605 
 606     // Forward the exception directly to deopt blob. We can blow no
 607     // registers and must leave throwing pc on the stack.  A patch may
 608     // have values live in registers so the entry point with the
 609     // exception in tls.
 610     __ far_jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
 611 
 612     __ bind(L);
 613   }
 614 
 615 
 616   // Runtime will return true if the nmethod has been deoptimized during
 617   // the patching process. In that case we must do a deopt reexecute instead.
 618 
 619   Label cont;
 620 
 621   __ cbz(r0, cont);                                 // have we deoptimized?
 622 
 623   // Will reexecute. Proper return address is already on the stack we just restore
 624   // registers, pop all of our frame but the return address and jump to the deopt blob
 625   restore_live_registers(sasm);
 626   __ leave();
 627   __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
 628 
 629   __ bind(cont);
 630   restore_live_registers(sasm);
 631   __ leave();
 632   __ ret(lr);
 633 
 634   return oop_maps;
 635 }
 636 
 637 
 638 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 639 
 640   const Register exception_oop = r0;
 641   const Register exception_pc  = r3;
 642 
 643   // for better readability
 644   const bool must_gc_arguments = true;
 645   const bool dont_gc_arguments = false;
 646 
 647   // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
 648   bool save_fpu_registers = true;
 649 
 650   // stub code & info for the different stubs
 651   OopMapSet* oop_maps = NULL;
 652   OopMap* oop_map = NULL;
 653   switch (id) {
 654     {
 655     case forward_exception_id:
 656       {
 657         oop_maps = generate_handle_exception(id, sasm);
 658         __ leave();
 659         __ ret(lr);
 660       }
 661       break;
 662 
 663     case throw_div0_exception_id:
 664       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
 665         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 666       }
 667       break;
 668 
 669     case throw_null_pointer_exception_id:
 670       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
 671         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 672       }
 673       break;
 674 
 675     case new_instance_id:
 676     case fast_new_instance_id:
 677     case fast_new_instance_init_check_id:
 678       {
 679         Register klass = r3; // Incoming
 680         Register obj   = r0; // Result
 681 
 682         if (id == new_instance_id) {
 683           __ set_info("new_instance", dont_gc_arguments);
 684         } else if (id == fast_new_instance_id) {
 685           __ set_info("fast new_instance", dont_gc_arguments);
 686         } else {
 687           assert(id == fast_new_instance_init_check_id, "bad StubID");
 688           __ set_info("fast new_instance init check", dont_gc_arguments);
 689         }
 690 
 691         // If TLAB is disabled, see if there is support for inlining contiguous
 692         // allocations.
 693         // Otherwise, just go to the slow path.
 694         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 695             !UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
 696           Label slow_path;
 697           Register obj_size = r2;
 698           Register t1       = r19;
 699           Register t2       = r4;
 700           assert_different_registers(klass, obj, obj_size, t1, t2);
 701 
 702           __ stp(r19, zr, Address(__ pre(sp, -2 * wordSize)));
 703 
 704           if (id == fast_new_instance_init_check_id) {
 705             // make sure the klass is initialized
 706             __ ldrb(rscratch1, Address(klass, InstanceKlass::init_state_offset()));
 707             __ cmpw(rscratch1, InstanceKlass::fully_initialized);
 708             __ br(Assembler::NE, slow_path);
 709           }
 710 
 711 #ifdef ASSERT
 712           // assert object can be fast path allocated
 713           {
 714             Label ok, not_ok;
 715             __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 716             __ cmp(obj_size, (u1)0);
 717             __ br(Assembler::LE, not_ok);  // make sure it's an instance (LH > 0)
 718             __ tstw(obj_size, Klass::_lh_instance_slow_path_bit);
 719             __ br(Assembler::EQ, ok);
 720             __ bind(not_ok);
 721             __ stop("assert(can be fast path allocated)");
 722             __ should_not_reach_here();
 723             __ bind(ok);
 724           }
 725 #endif // ASSERT
 726 
 727           // get the instance size (size is postive so movl is fine for 64bit)
 728           __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset()));
 729 
 730           __ eden_allocate(obj, obj_size, 0, t1, slow_path);
 731 
 732           __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false);
 733           __ verify_oop(obj);
 734           __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
 735           __ ret(lr);
 736 
 737           __ bind(slow_path);
 738           __ ldp(r19, zr, Address(__ post(sp, 2 * wordSize)));
 739         }
 740 
 741         __ enter();
 742         OopMap* map = save_live_registers(sasm);
 743         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 744         oop_maps = new OopMapSet();
 745         oop_maps->add_gc_map(call_offset, map);
 746         restore_live_registers_except_r0(sasm);
 747         __ verify_oop(obj);
 748         __ leave();
 749         __ ret(lr);
 750 
 751         // r0,: new instance
 752       }
 753 
 754       break;
 755 
 756     case counter_overflow_id:
 757       {
 758         Register bci = r0, method = r1;
 759         __ enter();
 760         OopMap* map = save_live_registers(sasm);
 761         // Retrieve bci
 762         __ ldrw(bci, Address(rfp, 2*BytesPerWord));
 763         // And a pointer to the Method*
 764         __ ldr(method, Address(rfp, 3*BytesPerWord));
 765         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 766         oop_maps = new OopMapSet();
 767         oop_maps->add_gc_map(call_offset, map);
 768         restore_live_registers(sasm);
 769         __ leave();
 770         __ ret(lr);
 771       }
 772       break;
 773 
 774     case new_type_array_id:
 775     case new_object_array_id:
 776     case new_value_array_id:
 777       {
 778         Register length   = r19; // Incoming
 779         Register klass    = r3; // Incoming
 780         Register obj      = r0; // Result
 781 
 782         if (id == new_type_array_id) {
 783           __ set_info("new_type_array", dont_gc_arguments);
 784         }
 785         else if (id == new_object_array_id) {
 786           __ set_info("new_object_array", dont_gc_arguments);
 787         }
 788         else { 
 789           __ set_info("new_value_array", dont_gc_arguments);
 790         }
 791 
 792 #ifdef ASSERT
 793         // assert object type is really an array of the proper kind
 794         {
 795           Label ok;
 796           Register t0 = obj;
 797           __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
 798           __ asrw(t0, t0, Klass::_lh_array_tag_shift);
 799 
 800           int tag = 0;
 801           switch (id) {
 802            case new_type_array_id: tag = Klass::_lh_array_tag_type_value; break;
 803            case new_object_array_id: tag = Klass::_lh_array_tag_obj_value; break;
 804            case new_value_array_id: tag = Klass::_lh_array_tag_vt_value; break;
 805            default:  ShouldNotReachHere();
 806           }
 807           __ mov(rscratch1, tag);
 808           __ cmpw(t0, rscratch1);
 809           __ br(Assembler::EQ, ok);
 810           __ stop("assert(is an array klass)");
 811           __ should_not_reach_here();
 812           __ bind(ok);
 813         }
 814 #endif // ASSERT
 815 
 816         // If TLAB is disabled, see if there is support for inlining contiguous
 817         // allocations.
 818         // Otherwise, just go to the slow path.
 819         if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) {
 820           Register arr_size = r4;
 821           Register t1       = r2;
 822           Register t2       = r5;
 823           Label slow_path;
 824           assert_different_registers(length, klass, obj, arr_size, t1, t2);
 825 
 826           // check that array length is small enough for fast path.
 827           __ mov(rscratch1, C1_MacroAssembler::max_array_allocation_length);
 828           __ cmpw(length, rscratch1);
 829           __ br(Assembler::HI, slow_path);
 830 
 831           // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
 832           // since size is positive ldrw does right thing on 64bit
 833           __ ldrw(t1, Address(klass, Klass::layout_helper_offset()));
 834           // since size is positive movw does right thing on 64bit
 835           __ movw(arr_size, length);
 836           __ lslvw(arr_size, length, t1);
 837           __ ubfx(t1, t1, Klass::_lh_header_size_shift,
 838                   exact_log2(Klass::_lh_header_size_mask + 1));
 839           __ add(arr_size, arr_size, t1);
 840           __ add(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
 841           __ andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
 842 
 843           __ eden_allocate(obj, arr_size, 0, t1, slow_path);  // preserves arr_size
 844 
 845           __ initialize_header(obj, klass, length, t1, t2);
 846           __ ldrb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
 847           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
 848           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
 849           __ andr(t1, t1, Klass::_lh_header_size_mask);
 850           __ sub(arr_size, arr_size, t1);  // body length
 851           __ add(t1, t1, obj);       // body start
 852           __ initialize_body(t1, arr_size, 0, t2);
 853           __ verify_oop(obj);
 854 
 855           __ ret(lr);
 856 
 857           __ bind(slow_path);
 858         }
 859 
 860         __ enter();
 861         OopMap* map = save_live_registers(sasm);
 862         int call_offset;
 863         if (id == new_type_array_id) {
 864           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 865         } else {
 866           // Runtime1::new_object_array handles both object and value arrays
 867           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
 868         }
 869 
 870         oop_maps = new OopMapSet();
 871         oop_maps->add_gc_map(call_offset, map);
 872         restore_live_registers_except_r0(sasm);
 873 
 874         __ verify_oop(obj);
 875         __ leave();
 876         __ ret(lr);
 877 
 878         // r0: new array
 879       }
 880       break;
 881 
 882     case new_multi_array_id:
 883       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
 884         // r0,: klass
 885         // r19,: rank
 886         // r2: address of 1st dimension
 887         OopMap* map = save_live_registers(sasm);
 888         __ mov(c_rarg1, r0);
 889         __ mov(c_rarg3, r2);
 890         __ mov(c_rarg2, r19);
 891         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), r1, r2, r3);
 892 
 893         oop_maps = new OopMapSet();
 894         oop_maps->add_gc_map(call_offset, map);
 895         restore_live_registers_except_r0(sasm);
 896 
 897         // r0,: new multi array
 898         __ verify_oop(r0);
 899       }
 900       break;
 901 
 902     case buffer_value_args_id:
 903     case buffer_value_args_no_receiver_id:
 904     { 
 905         const char* name = (id == buffer_value_args_id) ?
 906           "buffer_value_args" : "buffer_value_args_no_receiver";
 907         StubFrame f(sasm, name, dont_gc_arguments);
 908         OopMap* map = save_live_registers(sasm, 2);
 909         Register method = r1; 
 910         address entry = (id == buffer_value_args_id) ?
 911           CAST_FROM_FN_PTR(address, buffer_value_args) :
 912           CAST_FROM_FN_PTR(address, buffer_value_args_no_receiver);
 913         int call_offset = __ call_RT(r0, noreg, entry, method);
 914         oop_maps = new OopMapSet();
 915         oop_maps->add_gc_map(call_offset, map);
 916         restore_live_registers_except_r0(sasm);
 917         __ verify_oop(r0);  // r0: an array of buffered value objects
 918      }
 919      break;
 920 
 921     case load_flattened_array_id:
 922       {
 923         StubFrame f(sasm, "load_flattened_array", dont_gc_arguments);
 924         OopMap* map = save_live_registers(sasm, 3);
 925 
 926         // Called with store_parameter and not C abi
 927 
 928         f.load_argument(1, r0); // r0,: array
 929         f.load_argument(0, r1); // r1,: index
 930         int call_offset = __ call_RT(r0, noreg, CAST_FROM_FN_PTR(address, load_flattened_array), r0, r1);
 931 
 932         oop_maps = new OopMapSet();
 933         oop_maps->add_gc_map(call_offset, map);
 934         restore_live_registers_except_r0(sasm);
 935 
 936         // r0: loaded element at array[index]
 937         __ verify_oop(r0);
 938       }
 939       break;
 940 
 941     case store_flattened_array_id:
 942       {
 943         StubFrame f(sasm, "store_flattened_array", dont_gc_arguments);
 944         OopMap* map = save_live_registers(sasm, 4);
 945 
 946         // Called with store_parameter and not C abi
 947 
 948         f.load_argument(2, r0); // r0: array
 949         f.load_argument(1, r1); // r1: index
 950         f.load_argument(0, r2); // r2: value
 951         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, store_flattened_array), r0, r1, r2);
 952 
 953         oop_maps = new OopMapSet();
 954         oop_maps->add_gc_map(call_offset, map);
 955         restore_live_registers_except_r0(sasm);
 956       }
 957       break;
 958 
 959       case substitutability_check_id:
 960       { 
 961         StubFrame f(sasm, "substitutability_check", dont_gc_arguments);
 962         OopMap* map = save_live_registers(sasm, 3);
 963 
 964         // Called with store_parameter and not C abi
 965 
 966         f.load_argument(1, r0); // r0,: left
 967         f.load_argument(0, r1); // r1,: right
 968         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, substitutability_check), r0, r1);
 969 
 970         oop_maps = new OopMapSet();
 971         oop_maps->add_gc_map(call_offset, map);
 972         restore_live_registers_except_r0(sasm);
 973 
 974         // r0,: are the two operands substitutable
 975       }
 976       break;
 977 
 978 
 979 
 980     case register_finalizer_id:
 981       {
 982         __ set_info("register_finalizer", dont_gc_arguments);
 983 
 984         // This is called via call_runtime so the arguments
 985         // will be place in C abi locations
 986 
 987         __ verify_oop(c_rarg0);
 988 
 989         // load the klass and check the has finalizer flag
 990         Label register_finalizer;
 991         Register t = r5;
 992         __ load_klass(t, r0);
 993         __ ldrw(t, Address(t, Klass::access_flags_offset()));
 994         __ tbnz(t, exact_log2(JVM_ACC_HAS_FINALIZER), register_finalizer);
 995         __ ret(lr);
 996 
 997         __ bind(register_finalizer);
 998         __ enter();
 999         OopMap* oop_map = save_live_registers(sasm);
1000         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), r0);
1001         oop_maps = new OopMapSet();
1002         oop_maps->add_gc_map(call_offset, oop_map);
1003 
1004         // Now restore all the live registers
1005         restore_live_registers(sasm);
1006 
1007         __ leave();
1008         __ ret(lr);
1009       }
1010       break;
1011 
1012     case throw_class_cast_exception_id:
1013       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1014         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1015       }
1016       break;
1017 
1018     case throw_incompatible_class_change_error_id:
1019       { StubFrame f(sasm, "throw_incompatible_class_change_exception", dont_gc_arguments);
1020         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1021       }
1022       break;
1023 
1024     case throw_illegal_monitor_state_exception_id:
1025       { StubFrame f(sasm, "throw_illegal_monitor_state_exception", dont_gc_arguments);
1026         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_illegal_monitor_state_exception), false);
1027       }
1028       break;
1029 
1030     case slow_subtype_check_id:
1031       {
1032         // Typical calling sequence:
1033         // __ push(klass_RInfo);  // object klass or other subclass
1034         // __ push(sup_k_RInfo);  // array element klass or other superclass
1035         // __ bl(slow_subtype_check);
1036         // Note that the subclass is pushed first, and is therefore deepest.
1037         enum layout {
1038           r0_off, r0_off_hi,
1039           r2_off, r2_off_hi,
1040           r4_off, r4_off_hi,
1041           r5_off, r5_off_hi,
1042           sup_k_off, sup_k_off_hi,
1043           klass_off, klass_off_hi,
1044           framesize,
1045           result_off = sup_k_off
1046         };
1047 
1048         __ set_info("slow_subtype_check", dont_gc_arguments);
1049         __ push(RegSet::of(r0, r2, r4, r5), sp);
1050 
1051         // This is called by pushing args and not with C abi
1052         // __ ldr(r4, Address(sp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1053         // __ ldr(r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1054 
1055         __ ldp(r4, r0, Address(sp, (sup_k_off) * VMRegImpl::stack_slot_size));
1056 
1057         Label miss;
1058         __ check_klass_subtype_slow_path(r4, r0, r2, r5, NULL, &miss);
1059 
1060         // fallthrough on success:
1061         __ mov(rscratch1, 1);
1062         __ str(rscratch1, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
1063         __ pop(RegSet::of(r0, r2, r4, r5), sp);
1064         __ ret(lr);
1065 
1066         __ bind(miss);
1067         __ str(zr, Address(sp, (result_off) * VMRegImpl::stack_slot_size)); // result
1068         __ pop(RegSet::of(r0, r2, r4, r5), sp);
1069         __ ret(lr);
1070       }
1071       break;
1072 
1073     case monitorenter_nofpu_id:
1074       save_fpu_registers = false;
1075       // fall through
1076     case monitorenter_id:
1077       {
1078         StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1079         OopMap* map = save_live_registers(sasm, save_fpu_registers);
1080 
1081         // Called with store_parameter and not C abi
1082 
1083         f.load_argument(1, r0); // r0,: object
1084         f.load_argument(0, r1); // r1,: lock address
1085 
1086         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), r0, r1);
1087 
1088         oop_maps = new OopMapSet();
1089         oop_maps->add_gc_map(call_offset, map);
1090         restore_live_registers(sasm, save_fpu_registers);
1091       }
1092       break;
1093 
1094     case monitorexit_nofpu_id:
1095       save_fpu_registers = false;
1096       // fall through
1097     case monitorexit_id:
1098       {
1099         StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1100         OopMap* map = save_live_registers(sasm, save_fpu_registers);
1101 
1102         // Called with store_parameter and not C abi
1103 
1104         f.load_argument(0, r0); // r0,: lock address
1105 
1106         // note: really a leaf routine but must setup last java sp
1107         //       => use call_RT for now (speed can be improved by
1108         //       doing last java sp setup manually)
1109         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), r0);
1110 
1111         oop_maps = new OopMapSet();
1112         oop_maps->add_gc_map(call_offset, map);
1113         restore_live_registers(sasm, save_fpu_registers);
1114       }
1115       break;
1116 
1117     case deoptimize_id:
1118       {
1119         StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1120         OopMap* oop_map = save_live_registers(sasm);
1121         f.load_argument(0, c_rarg1);
1122         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), c_rarg1);
1123 
1124         oop_maps = new OopMapSet();
1125         oop_maps->add_gc_map(call_offset, oop_map);
1126         restore_live_registers(sasm);
1127         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1128         assert(deopt_blob != NULL, "deoptimization blob must have been created");
1129         __ leave();
1130         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1131       }
1132       break;
1133 
1134     case throw_range_check_failed_id:
1135       { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1136         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1137       }
1138       break;
1139 
1140     case unwind_exception_id:
1141       { __ set_info("unwind_exception", dont_gc_arguments);
1142         // note: no stubframe since we are about to leave the current
1143         //       activation and we are calling a leaf VM function only.
1144         generate_unwind_exception(sasm);
1145       }
1146       break;
1147 
1148     case access_field_patching_id:
1149       { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1150         // we should set up register map
1151         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1152       }
1153       break;
1154 
1155     case load_klass_patching_id:
1156       { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1157         // we should set up register map
1158         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1159       }
1160       break;
1161 
1162     case load_mirror_patching_id:
1163       { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1164         // we should set up register map
1165         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1166       }
1167       break;
1168 
1169     case load_appendix_patching_id:
1170       { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1171         // we should set up register map
1172         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1173       }
1174       break;
1175 
1176     case handle_exception_nofpu_id:
1177     case handle_exception_id:
1178       { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1179         oop_maps = generate_handle_exception(id, sasm);
1180       }
1181       break;
1182 
1183     case handle_exception_from_callee_id:
1184       { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1185         oop_maps = generate_handle_exception(id, sasm);
1186       }
1187       break;
1188 
1189     case throw_index_exception_id:
1190       { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1191         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1192       }
1193       break;
1194 
1195     case throw_array_store_exception_id:
1196       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1197         // tos + 0: link
1198         //     + 1: return address
1199         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1200       }
1201       break;
1202 
1203     case predicate_failed_trap_id:
1204       {
1205         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1206 
1207         OopMap* map = save_live_registers(sasm);
1208 
1209         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1210         oop_maps = new OopMapSet();
1211         oop_maps->add_gc_map(call_offset, map);
1212         restore_live_registers(sasm);
1213         __ leave();
1214         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1215         assert(deopt_blob != NULL, "deoptimization blob must have been created");
1216 
1217         __ far_jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1218       }
1219       break;
1220 
1221     default:
1222       // DMS CHECK: This code should be fixed in JDK workspace, because it fails 
1223       // with assert during vm intialization rather than insert a call 
1224       // to unimplemented_entry
1225       { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1226         __ mov(r0, (int)id);
1227         __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
1228         __ should_not_reach_here();
1229       }
1230       break;
1231     }
1232   }
1233 
1234 
1235   return oop_maps;
1236 }
1237 
1238 #undef __
1239 
1240 const char *Runtime1::pd_name_for_address(address entry) { Unimplemented(); return 0; }