1 /*
   2  * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_MacroAssembler.hpp"
  29 #include "c1/c1_Runtime1.hpp"
  30 #include "ci/ciUtilities.hpp"
  31 #include "gc/shared/cardTable.hpp"
  32 #include "gc/shared/cardTableModRefBS.hpp"
  33 #include "interpreter/interpreter.hpp"
  34 #include "nativeInst_s390.hpp"
  35 #include "oops/compiledICHolder.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "prims/jvmtiExport.hpp"
  38 #include "register_s390.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/signature.hpp"
  41 #include "runtime/vframeArray.hpp"
  42 #include "utilities/macros.hpp"
  43 #include "vmreg_s390.inline.hpp"
  44 #include "registerSaver_s390.hpp"
  45 #if INCLUDE_ALL_GCS
  46 #include "gc/g1/g1CardTable.hpp"
  47 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  48 #endif
  49 
  50 // Implementation of StubAssembler
  51 
  52 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
  53   set_num_rt_args(0); // Nothing on stack.
  54   assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
  55 
  56   // We cannot trust that code generated by the C++ compiler saves R14
  57   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
  58   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
  59   // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
  60   // it into the frame anchor.
  61   address pc = get_PC(Z_R1_scratch);
  62   int call_offset = (int)(pc - addr_at(0));
  63   set_last_Java_frame(Z_SP, Z_R1_scratch);
  64 
  65   // ARG1 must hold thread address.
  66   z_lgr(Z_ARG1, Z_thread);
  67 
  68   address return_pc = NULL;
  69   align_call_far_patchable(this->pc());
  70   return_pc = call_c_opt(entry_point);
  71   assert(return_pc != NULL, "const section overflow");
  72 
  73   reset_last_Java_frame();
  74 
  75   // Check for pending exceptions.
  76   {
  77     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
  78 
  79     // This used to conditionally jump to forward_exception however it is
  80     // possible if we relocate that the branch will not reach. So we must jump
  81     // around so we can always reach.
  82 
  83     Label ok;
  84     z_bre(ok); // Bcondequal is the same as bcondZero.
  85 
  86     // exception pending => forward to exception handler
  87 
  88     // Make sure that the vm_results are cleared.
  89     if (oop_result1->is_valid()) {
  90       clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong));
  91     }
  92     if (metadata_result->is_valid()) {
  93       clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong));
  94     }
  95     if (frame_size() == no_frame_size) {
  96       // Pop the stub frame.
  97       pop_frame();
  98       restore_return_pc();
  99       load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
 100       z_br(Z_R1);
 101     } else if (_stub_id == Runtime1::forward_exception_id) {
 102       should_not_reach_here();
 103     } else {
 104       load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id));
 105       z_br(Z_R1);
 106     }
 107 
 108     bind(ok);
 109   }
 110 
 111   // Get oop results if there are any and reset the values in the thread.
 112   if (oop_result1->is_valid()) {
 113     get_vm_result(oop_result1);
 114   }
 115   if (metadata_result->is_valid()) {
 116     get_vm_result_2(metadata_result);
 117   }
 118 
 119   return call_offset;
 120 }
 121 
 122 
 123 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
 124   // Z_ARG1 is reserved for the thread.
 125   lgr_if_needed(Z_ARG2, arg1);
 126   return call_RT(oop_result1, metadata_result, entry, 1);
 127 }
 128 
 129 
 130 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
 131   // Z_ARG1 is reserved for the thread.
 132   lgr_if_needed(Z_ARG2, arg1);
 133   assert(arg2 != Z_ARG2, "smashed argument");
 134   lgr_if_needed(Z_ARG3, arg2);
 135   return call_RT(oop_result1, metadata_result, entry, 2);
 136 }
 137 
 138 
 139 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
 140   // Z_ARG1 is reserved for the thread.
 141   lgr_if_needed(Z_ARG2, arg1);
 142   assert(arg2 != Z_ARG2, "smashed argument");
 143   lgr_if_needed(Z_ARG3, arg2);
 144   assert(arg3 != Z_ARG3, "smashed argument");
 145   lgr_if_needed(Z_ARG4, arg3);
 146   return call_RT(oop_result1, metadata_result, entry, 3);
 147 }
 148 
 149 
 150 // Implementation of Runtime1
 151 
 152 #define __ sasm->
 153 
 154 #ifndef PRODUCT
 155 #undef  __
 156 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)->
 157 #endif // !PRODUCT
 158 
 159 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
 160 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
 161 
 162 static OopMap* generate_oop_map(StubAssembler* sasm) {
 163   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers;
 164   int frame_size_in_slots =
 165     RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 166   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 167   return RegisterSaver::generate_oop_map(sasm, reg_set);
 168 }
 169 
 170 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) {
 171   __ block_comment("save_live_registers");
 172   RegisterSaver::RegisterSet reg_set =
 173     save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
 174   int frame_size_in_slots =
 175     RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 176   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 177   return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
 178 }
 179 
 180 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
 181   if (!save_fpu_registers) {
 182     __ unimplemented(FILE_AND_LINE);
 183   }
 184   __ block_comment("save_live_registers");
 185   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
 186   int frame_size_in_slots =
 187       RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 188   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 189   return RegisterSaver::save_live_registers(sasm, reg_set);
 190 }
 191 
 192 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
 193   __ block_comment("save_volatile_registers");
 194   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
 195   int frame_size_in_slots =
 196     RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 197   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 198   return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
 199 }
 200 
 201 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
 202   __ block_comment("restore_live_registers");
 203   RegisterSaver::RegisterSet reg_set =
 204     restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
 205   RegisterSaver::restore_live_registers(sasm, reg_set);
 206 }
 207 
 208 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
 209   if (!restore_fpu_registers) {
 210     __ unimplemented(FILE_AND_LINE);
 211   }
 212   __ block_comment("restore_live_registers_except_r2");
 213   RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
 214 }
 215 
 216 static void restore_volatile_registers(StubAssembler* sasm) {
 217   __ block_comment("restore_volatile_registers");
 218   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
 219   RegisterSaver::restore_live_registers(sasm, reg_set);
 220 }
 221 
 222 void Runtime1::initialize_pd() {
 223   // Nothing to do.
 224 }
 225 
 226 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
 227   // Make a frame and preserve the caller's caller-save registers.
 228   OopMap* oop_map = save_live_registers(sasm);
 229   int call_offset;
 230   if (!has_argument) {
 231     call_offset = __ call_RT(noreg, noreg, target);
 232   } else {
 233     call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
 234   }
 235   OopMapSet* oop_maps = new OopMapSet();
 236   oop_maps->add_gc_map(call_offset, oop_map);
 237 
 238   __ should_not_reach_here();
 239   return oop_maps;
 240 }
 241 
 242 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
 243   // Incoming parameters: Z_EXC_OOP and Z_EXC_PC.
 244   // Keep copies in callee-saved registers during runtime call.
 245   const Register exception_oop_callee_saved = Z_R11;
 246   const Register exception_pc_callee_saved = Z_R12;
 247   // Other registers used in this stub.
 248   const Register handler_addr = Z_R4;
 249 
 250   // Verify that only exception_oop, is valid at this time.
 251   __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC);
 252 
 253   // Check that fields in JavaThread for exception oop and issuing pc are set.
 254   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
 255   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
 256 
 257   // Save exception_oop and pc in callee-saved register to preserve it
 258   // during runtime calls.
 259   __ verify_not_null_oop(Z_EXC_OOP);
 260   __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP);
 261   __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC);
 262 
 263   __ push_frame_abi160(0); // Runtime code needs the z_abi_160.
 264 
 265   // Search the exception handler address of the caller (using the return address).
 266   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC);
 267   // Z_RET(Z_R2): exception handler address of the caller.
 268 
 269   __ pop_frame();
 270 
 271   __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET);
 272 
 273   // Move result of call into correct register.
 274   __ lgr_if_needed(handler_addr, Z_RET);
 275 
 276   // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler).
 277   __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved);
 278   __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved);
 279 
 280   // Verify that there is really a valid exception in Z_EXC_OOP.
 281   __ verify_not_null_oop(Z_EXC_OOP);
 282 
 283   __ z_br(handler_addr); // Jump to exception handler.
 284 }
 285 
 286 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 287   // Make a frame and preserve the caller's caller-save registers.
 288   OopMap* oop_map = save_live_registers(sasm);
 289 
 290   // Call the runtime patching routine, returns non-zero if nmethod got deopted.
 291   int call_offset = __ call_RT(noreg, noreg, target);
 292   OopMapSet* oop_maps = new OopMapSet();
 293   oop_maps->add_gc_map(call_offset, oop_map);
 294 
 295   // Re-execute the patched instruction or, if the nmethod was
 296   // deoptmized, return to the deoptimization handler entry that will
 297   // cause re-execution of the current bytecode.
 298   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 299   assert(deopt_blob != NULL, "deoptimization blob must have been created");
 300 
 301   __ z_ltr(Z_RET, Z_RET); // return value == 0
 302 
 303   restore_live_registers(sasm);
 304 
 305   __ z_bcr(Assembler::bcondZero, Z_R14);
 306 
 307   // Return to the deoptimization handler entry for unpacking and
 308   // rexecute if we simply returned then we'd deopt as if any call we
 309   // patched had just returned.
 310   AddressLiteral dest(deopt_blob->unpack_with_reexecution());
 311   __ load_const_optimized(Z_R1_scratch, dest);
 312   __ z_br(Z_R1_scratch);
 313 
 314   return oop_maps;
 315 }
 316 
 317 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 318 
 319   // for better readability
 320   const bool must_gc_arguments = true;
 321   const bool dont_gc_arguments = false;
 322 
 323   // Default value; overwritten for some optimized stubs that are
 324   // called from methods that do not use the fpu.
 325   bool save_fpu_registers = true;
 326 
 327   // Stub code and info for the different stubs.
 328   OopMapSet* oop_maps = NULL;
 329   switch (id) {
 330     case forward_exception_id:
 331       {
 332         oop_maps = generate_handle_exception(id, sasm);
 333         // will not return
 334       }
 335       break;
 336 
 337     case new_instance_id:
 338     case fast_new_instance_id:
 339     case fast_new_instance_init_check_id:
 340       {
 341         Register klass    = Z_R11; // Incoming
 342         Register obj      = Z_R2;  // Result
 343 
 344         if (id == new_instance_id) {
 345           __ set_info("new_instance", dont_gc_arguments);
 346         } else if (id == fast_new_instance_id) {
 347           __ set_info("fast new_instance", dont_gc_arguments);
 348         } else {
 349           assert(id == fast_new_instance_init_check_id, "bad StubID");
 350           __ set_info("fast new_instance init check", dont_gc_arguments);
 351         }
 352 
 353         OopMap* map = save_live_registers_except_r2(sasm);
 354         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 355         oop_maps = new OopMapSet();
 356         oop_maps->add_gc_map(call_offset, map);
 357         restore_live_registers_except_r2(sasm);
 358 
 359         __ verify_oop(obj);
 360         __ z_br(Z_R14);
 361       }
 362       break;
 363 
 364     case counter_overflow_id:
 365       {
 366         // Arguments :
 367         //   bci    : stack param 0
 368         //   method : stack param 1
 369         //
 370         Register bci = Z_ARG2, method = Z_ARG3;
 371         // frame size in bytes
 372         OopMap* map = save_live_registers(sasm);
 373         const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
 374         __ z_lg(bci,    0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 375         __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 376         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 377         oop_maps = new OopMapSet();
 378         oop_maps->add_gc_map(call_offset, map);
 379         restore_live_registers(sasm);
 380         __ z_br(Z_R14);
 381       }
 382       break;
 383     case new_type_array_id:
 384     case new_object_array_id:
 385       {
 386         Register length   = Z_R13; // Incoming
 387         Register klass    = Z_R11; // Incoming
 388         Register obj      = Z_R2;  // Result
 389 
 390         if (id == new_type_array_id) {
 391           __ set_info("new_type_array", dont_gc_arguments);
 392         } else {
 393           __ set_info("new_object_array", dont_gc_arguments);
 394         }
 395 
 396 #ifdef ASSERT
 397         // Assert object type is really an array of the proper kind.
 398         {
 399           NearLabel ok;
 400           Register t0 = obj;
 401           __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false);
 402           __ z_sra(t0, Klass::_lh_array_tag_shift);
 403           int tag = ((id == new_type_array_id)
 404                      ? Klass::_lh_array_tag_type_value
 405                      : Klass::_lh_array_tag_obj_value);
 406           __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok);
 407           __ stop("assert(is an array klass)");
 408           __ should_not_reach_here();
 409           __ bind(ok);
 410         }
 411 #endif // ASSERT
 412 
 413         OopMap* map = save_live_registers_except_r2(sasm);
 414         int call_offset;
 415         if (id == new_type_array_id) {
 416           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 417         } else {
 418           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
 419         }
 420 
 421         oop_maps = new OopMapSet();
 422         oop_maps->add_gc_map(call_offset, map);
 423         restore_live_registers_except_r2(sasm);
 424 
 425         __ verify_oop(obj);
 426         __ z_br(Z_R14);
 427       }
 428       break;
 429 
 430     case new_multi_array_id:
 431       { __ set_info("new_multi_array", dont_gc_arguments);
 432         // Z_R3,: klass
 433         // Z_R4,: rank
 434         // Z_R5: address of 1st dimension
 435         OopMap* map = save_live_registers(sasm);
 436         int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5);
 437 
 438         oop_maps = new OopMapSet();
 439         oop_maps->add_gc_map(call_offset, map);
 440         restore_live_registers_except_r2(sasm);
 441 
 442         // Z_R2,: new multi array
 443         __ verify_oop(Z_R2);
 444         __ z_br(Z_R14);
 445       }
 446       break;
 447 
 448     case register_finalizer_id:
 449       {
 450         __ set_info("register_finalizer", dont_gc_arguments);
 451 
 452         // Load the klass and check the has finalizer flag.
 453         Register klass = Z_ARG2;
 454         __ load_klass(klass, Z_ARG1);
 455         __ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER));
 456         __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set.
 457 
 458         OopMap* oop_map = save_live_registers(sasm);
 459         int call_offset = __ call_RT(noreg, noreg,
 460                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1);
 461         oop_maps = new OopMapSet();
 462         oop_maps->add_gc_map(call_offset, oop_map);
 463 
 464         // Now restore all the live registers.
 465         restore_live_registers(sasm);
 466 
 467         __ z_br(Z_R14);
 468       }
 469       break;
 470 
 471     case throw_range_check_failed_id:
 472       { __ set_info("range_check_failed", dont_gc_arguments);
 473         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
 474       }
 475       break;
 476 
 477     case throw_index_exception_id:
 478       { __ set_info("index_range_check_failed", dont_gc_arguments);
 479         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
 480       }
 481       break;
 482     case throw_div0_exception_id:
 483       { __ set_info("throw_div0_exception", dont_gc_arguments);
 484         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 485       }
 486       break;
 487     case throw_null_pointer_exception_id:
 488       { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
 489         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 490       }
 491       break;
 492     case handle_exception_nofpu_id:
 493     case handle_exception_id:
 494       { __ set_info("handle_exception", dont_gc_arguments);
 495         oop_maps = generate_handle_exception(id, sasm);
 496       }
 497       break;
 498     case handle_exception_from_callee_id:
 499       { __ set_info("handle_exception_from_callee", dont_gc_arguments);
 500         oop_maps = generate_handle_exception(id, sasm);
 501       }
 502       break;
 503     case unwind_exception_id:
 504       { __ set_info("unwind_exception", dont_gc_arguments);
 505         // Note: no stubframe since we are about to leave the current
 506         // activation and we are calling a leaf VM function only.
 507         generate_unwind_exception(sasm);
 508       }
 509       break;
 510     case throw_array_store_exception_id:
 511       { __ set_info("throw_array_store_exception", dont_gc_arguments);
 512         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
 513       }
 514       break;
 515     case throw_class_cast_exception_id:
 516     { // Z_R1_scratch: object
 517       __ set_info("throw_class_cast_exception", dont_gc_arguments);
 518       oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
 519     }
 520     break;
 521     case throw_incompatible_class_change_error_id:
 522       { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
 523         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
 524       }
 525       break;
 526     case slow_subtype_check_id:
 527     {
 528       // Arguments :
 529       //   sub  : stack param 0
 530       //   super: stack param 1
 531       //   raddr: Z_R14, blown by call
 532       //
 533       // Result : condition code 0 for match (bcondEqual will be true),
 534       //          condition code 2 for miss  (bcondNotEqual will be true)
 535       NearLabel miss;
 536       const Register Rsubklass   = Z_ARG2; // sub
 537       const Register Rsuperklass = Z_ARG3; // super
 538 
 539       // No args, but tmp registers that are killed.
 540       const Register Rlength     = Z_ARG4; // cache array length
 541       const Register Rarray_ptr  = Z_ARG5; // Current value from cache array.
 542 
 543       if (UseCompressedOops) {
 544         assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
 545       }
 546 
 547       const int frame_size = 4*BytesPerWord + frame::z_abi_160_size;
 548       // Save return pc. This is not necessary, but could be helpful
 549       // in the case of crashes.
 550       __ save_return_pc();
 551       __ push_frame(frame_size);
 552       // Save registers before changing them.
 553       int i = 0;
 554       __ z_stg(Rsubklass,   (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 555       __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 556       __ z_stg(Rlength,     (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 557       __ z_stg(Rarray_ptr,  (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 558       assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
 559 
 560       // Get sub and super from stack.
 561       __ z_lg(Rsubklass,   0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 562       __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 563 
 564       __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss);
 565 
 566       // Match falls through here.
 567       i = 0;
 568       __ z_lg(Rsubklass,   (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 569       __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 570       __ z_lg(Rlength,     (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 571       __ z_lg(Rarray_ptr,  (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 572       assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
 573       __ pop_frame();
 574       // Return pc is still in R_14.
 575       __ clear_reg(Z_R0_scratch);         // Zero indicates a match. Set CC 0 (bcondEqual will be true)
 576       __ z_br(Z_R14);
 577 
 578       __ BIND(miss);
 579       i = 0;
 580       __ z_lg(Rsubklass,   (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 581       __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 582       __ z_lg(Rlength,     (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 583       __ z_lg(Rarray_ptr,  (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 584       assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
 585       __ pop_frame();
 586       // return pc is still in R_14
 587       __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss.
 588       __ z_ltgr(Z_R0_scratch, Z_R0_scratch);    // Set CC 2 (bcondNotEqual will be true).
 589       __ z_br(Z_R14);
 590     }
 591     break;
 592     case monitorenter_nofpu_id:
 593     case monitorenter_id:
 594       { // Z_R1_scratch : object
 595         // Z_R13       : lock address (see LIRGenerator::syncTempOpr())
 596         __ set_info("monitorenter", dont_gc_arguments);
 597 
 598         int save_fpu_registers = (id == monitorenter_id);
 599         // Make a frame and preserve the caller's caller-save registers.
 600         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 601 
 602         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13);
 603 
 604         oop_maps = new OopMapSet();
 605         oop_maps->add_gc_map(call_offset, oop_map);
 606         restore_live_registers(sasm, save_fpu_registers);
 607 
 608         __ z_br(Z_R14);
 609       }
 610       break;
 611 
 612     case monitorexit_nofpu_id:
 613     case monitorexit_id:
 614       { // Z_R1_scratch : lock address
 615         // Note: really a leaf routine but must setup last java sp
 616         //   => Use call_RT for now (speed can be improved by
 617         //      doing last java sp setup manually).
 618         __ set_info("monitorexit", dont_gc_arguments);
 619 
 620         int save_fpu_registers = (id == monitorexit_id);
 621         // Make a frame and preserve the caller's caller-save registers.
 622         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 623 
 624         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch);
 625 
 626         oop_maps = new OopMapSet();
 627         oop_maps->add_gc_map(call_offset, oop_map);
 628         restore_live_registers(sasm, save_fpu_registers);
 629 
 630         __ z_br(Z_R14);
 631       }
 632       break;
 633 
 634     case deoptimize_id:
 635       { // Args: Z_R1_scratch: trap request
 636         __ set_info("deoptimize", dont_gc_arguments);
 637         Register trap_request = Z_R1_scratch;
 638         OopMap* oop_map = save_live_registers(sasm);
 639         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
 640         oop_maps = new OopMapSet();
 641         oop_maps->add_gc_map(call_offset, oop_map);
 642         restore_live_registers(sasm);
 643         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 644         assert(deopt_blob != NULL, "deoptimization blob must have been created");
 645         AddressLiteral dest(deopt_blob->unpack_with_reexecution());
 646         __ load_const_optimized(Z_R1_scratch, dest);
 647         __ z_br(Z_R1_scratch);
 648       }
 649       break;
 650 
 651     case access_field_patching_id:
 652       { __ set_info("access_field_patching", dont_gc_arguments);
 653         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
 654       }
 655       break;
 656 
 657     case load_klass_patching_id:
 658       { __ set_info("load_klass_patching", dont_gc_arguments);
 659         // We should set up register map.
 660         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
 661       }
 662       break;
 663 
 664     case load_mirror_patching_id:
 665       { __ set_info("load_mirror_patching", dont_gc_arguments);
 666         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
 667       }
 668       break;
 669 
 670     case load_appendix_patching_id:
 671       { __ set_info("load_appendix_patching", dont_gc_arguments);
 672         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
 673       }
 674       break;
 675 #if 0
 676     case dtrace_object_alloc_id:
 677       { // rax,: object
 678         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
 679         // We can't gc here so skip the oopmap but make sure that all
 680         // the live registers get saved.
 681         save_live_registers(sasm, 1);
 682 
 683         __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
 684         __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
 685         NOT_LP64(__ pop(rax));
 686 
 687         restore_live_registers(sasm);
 688       }
 689       break;
 690 
 691     case fpu2long_stub_id:
 692       {
 693         // rax, and rdx are destroyed, but should be free since the result is returned there
 694         // preserve rsi,ecx
 695         __ push(rsi);
 696         __ push(rcx);
 697         LP64_ONLY(__ push(rdx);)
 698 
 699         // check for NaN
 700         Label return0, do_return, return_min_jlong, do_convert;
 701 
 702         Address value_high_word(rsp, wordSize + 4);
 703         Address value_low_word(rsp, wordSize);
 704         Address result_high_word(rsp, 3*wordSize + 4);
 705         Address result_low_word(rsp, 3*wordSize);
 706 
 707         __ subptr(rsp, 32);                    // more than enough on 32bit
 708         __ fst_d(value_low_word);
 709         __ movl(rax, value_high_word);
 710         __ andl(rax, 0x7ff00000);
 711         __ cmpl(rax, 0x7ff00000);
 712         __ jcc(Assembler::notEqual, do_convert);
 713         __ movl(rax, value_high_word);
 714         __ andl(rax, 0xfffff);
 715         __ orl(rax, value_low_word);
 716         __ jcc(Assembler::notZero, return0);
 717 
 718         __ bind(do_convert);
 719         __ fnstcw(Address(rsp, 0));
 720         __ movzwl(rax, Address(rsp, 0));
 721         __ orl(rax, 0xc00);
 722         __ movw(Address(rsp, 2), rax);
 723         __ fldcw(Address(rsp, 2));
 724         __ fwait();
 725         __ fistp_d(result_low_word);
 726         __ fldcw(Address(rsp, 0));
 727         __ fwait();
 728         // This gets the entire long in rax on 64bit
 729         __ movptr(rax, result_low_word);
 730         // testing of high bits
 731         __ movl(rdx, result_high_word);
 732         __ mov(rcx, rax);
 733         // What the heck is the point of the next instruction???
 734         __ xorl(rcx, 0x0);
 735         __ movl(rsi, 0x80000000);
 736         __ xorl(rsi, rdx);
 737         __ orl(rcx, rsi);
 738         __ jcc(Assembler::notEqual, do_return);
 739         __ fldz();
 740         __ fcomp_d(value_low_word);
 741         __ fnstsw_ax();
 742         __ testl(rax, 0x4100);  // ZF & CF == 0
 743         __ jcc(Assembler::equal, return_min_jlong);
 744         // return max_jlong
 745         __ mov64(rax, CONST64(0x7fffffffffffffff));
 746         __ jmp(do_return);
 747 
 748         __ bind(return_min_jlong);
 749         __ mov64(rax, UCONST64(0x8000000000000000));
 750         __ jmp(do_return);
 751 
 752         __ bind(return0);
 753         __ fpop();
 754         __ xorptr(rax, rax);
 755 
 756         __ bind(do_return);
 757         __ addptr(rsp, 32);
 758         LP64_ONLY(__ pop(rdx);)
 759         __ pop(rcx);
 760         __ pop(rsi);
 761         __ ret(0);
 762       }
 763       break;
 764 #endif // TODO
 765 
 766 #if INCLUDE_ALL_GCS
 767     case g1_pre_barrier_slow_id:
 768       { // Z_R1_scratch: previous value of memory
 769 
 770         BarrierSet* bs = Universe::heap()->barrier_set();
 771         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
 772           __ should_not_reach_here(FILE_AND_LINE);
 773           break;
 774         }
 775 
 776         __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
 777 
 778         Register pre_val = Z_R1_scratch;
 779         Register tmp  = Z_R6; // Must be non-volatile because it is used to save pre_val.
 780         Register tmp2 = Z_R7;
 781 
 782         Label refill, restart, marking_not_active;
 783         int satb_q_active_byte_offset =
 784           in_bytes(JavaThread::satb_mark_queue_offset() +
 785                    SATBMarkQueue::byte_offset_of_active());
 786         int satb_q_index_byte_offset =
 787           in_bytes(JavaThread::satb_mark_queue_offset() +
 788                    SATBMarkQueue::byte_offset_of_index());
 789         int satb_q_buf_byte_offset =
 790           in_bytes(JavaThread::satb_mark_queue_offset() +
 791                    SATBMarkQueue::byte_offset_of_buf());
 792 
 793         // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
 794         __ z_stg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 795         __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 796 
 797         // Is marking still active?
 798         if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 799           __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
 800         } else {
 801           assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 802           __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
 803         }
 804         __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
 805 
 806         __ bind(restart);
 807         // Load the index into the SATB buffer. SATBMarkQueue::_index is a
 808         // size_t so ld_ptr is appropriate.
 809         __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
 810 
 811         // index == 0?
 812         __ z_brz(refill);
 813 
 814         __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
 815         __ add2reg(tmp, -oopSize);
 816 
 817         __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
 818         __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
 819 
 820         __ bind(marking_not_active);
 821         // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
 822         __ z_lg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 823         __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 824         __ z_br(Z_R14);
 825 
 826         __ bind(refill);
 827         save_volatile_registers(sasm);
 828         __ z_lgr(tmp, pre_val); // save pre_val
 829         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
 830                         Z_thread);
 831         __ z_lgr(pre_val, tmp); // restore pre_val
 832         restore_volatile_registers(sasm);
 833         __ z_bru(restart);
 834       }
 835       break;
 836 
 837     case g1_post_barrier_slow_id:
 838       { // Z_R1_scratch: oop address, address of updated memory slot
 839         BarrierSet* bs = Universe::heap()->barrier_set();
 840         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
 841           __ should_not_reach_here(FILE_AND_LINE);
 842           break;
 843         }
 844 
 845         __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
 846 
 847         Register addr_oop  = Z_R1_scratch;
 848         Register addr_card = Z_R1_scratch;
 849         Register r1        = Z_R6; // Must be saved/restored.
 850         Register r2        = Z_R7; // Must be saved/restored.
 851         Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
 852         jbyte* byte_map_base = ci_card_table_address();
 853 
 854         // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
 855         __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 856 
 857         Label not_already_dirty, restart, refill, young_card;
 858 
 859         // Calculate address of card corresponding to the updated oop slot.
 860         AddressLiteral rs(byte_map_base);
 861         __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
 862         addr_oop = noreg; // dead now
 863         __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
 864         __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
 865 
 866         __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
 867         __ z_bre(young_card);
 868 
 869         __ z_sync(); // Required to support concurrent cleaning.
 870 
 871         __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
 872         __ z_brne(not_already_dirty);
 873 
 874         __ bind(young_card);
 875         // We didn't take the branch, so we're already dirty: restore
 876         // used registers and return.
 877         __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 878         __ z_br(Z_R14);
 879 
 880         // Not dirty.
 881         __ bind(not_already_dirty);
 882 
 883         // First, dirty it: [addr_card] := 0
 884         __ z_mvi(0, addr_card, CardTable::dirty_card_val());
 885 
 886         Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
 887         Register buf = r2;
 888         cardtable = noreg; // now dead
 889 
 890         // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
 891         __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 892 
 893         ByteSize dirty_card_q_index_byte_offset =
 894           JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_index();
 895         ByteSize dirty_card_q_buf_byte_offset =
 896           JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf();
 897 
 898         __ bind(restart);
 899 
 900         // Get the index into the update buffer. DirtyCardQueue::_index is
 901         // a size_t so z_ltg is appropriate here.
 902         __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
 903 
 904         // index == 0?
 905         __ z_brz(refill);
 906 
 907         __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
 908         __ add2reg(idx, -oopSize);
 909 
 910         __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
 911         __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
 912         // Restore killed registers and return.
 913         __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 914         __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 915         __ z_br(Z_R14);
 916 
 917         __ bind(refill);
 918         save_volatile_registers(sasm);
 919         __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
 920         __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
 921                                          Z_thread);
 922         __ z_lgr(addr_card, idx);
 923         restore_volatile_registers(sasm); // Restore addr_card.
 924         __ z_bru(restart);
 925       }
 926       break;
 927 #endif // INCLUDE_ALL_GCS
 928     case predicate_failed_trap_id:
 929       {
 930         __ set_info("predicate_failed_trap", dont_gc_arguments);
 931 
 932         OopMap* map = save_live_registers(sasm);
 933 
 934         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
 935         oop_maps = new OopMapSet();
 936         oop_maps->add_gc_map(call_offset, map);
 937         restore_live_registers(sasm);
 938 
 939         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 940         assert(deopt_blob != NULL, "deoptimization blob must have been created");
 941 
 942         __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
 943         __ z_br(Z_R1_scratch);
 944       }
 945       break;
 946 
 947     default:
 948       {
 949         __ should_not_reach_here(FILE_AND_LINE, id);
 950       }
 951       break;
 952   }
 953   return oop_maps;
 954 }
 955 
 956 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
 957   __ block_comment("generate_handle_exception");
 958 
 959   // incoming parameters: Z_EXC_OOP, Z_EXC_PC
 960 
 961   // Save registers if required.
 962   OopMapSet* oop_maps = new OopMapSet();
 963   OopMap* oop_map = NULL;
 964   Register reg_fp = Z_R1_scratch;
 965 
 966   switch (id) {
 967     case forward_exception_id: {
 968       // We're handling an exception in the context of a compiled frame.
 969       // The registers have been saved in the standard places. Perform
 970       // an exception lookup in the caller and dispatch to the handler
 971       // if found. Otherwise unwind and dispatch to the callers
 972       // exception handler.
 973       oop_map = generate_oop_map(sasm);
 974 
 975       // Load and clear pending exception oop into.
 976       __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset()));
 977       __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8);
 978 
 979       // Different stubs forward their exceptions; they should all have similar frame layouts
 980       // (a) to find their return address (b) for a correct oop_map generated above.
 981       assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) ==
 982              RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement");
 983 
 984       // Load issuing PC (the return address for this stub).
 985       const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
 986       __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc)));
 987       DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));)
 988 
 989       // Make sure that the vm_results are cleared (may be unnecessary).
 990       __ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()),   sizeof(oop));
 991       __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*));
 992       break;
 993     }
 994     case handle_exception_nofpu_id:
 995     case handle_exception_id:
 996       // At this point all registers MAY be live.
 997       DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
 998       oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC);
 999       break;
1000     case handle_exception_from_callee_id: {
1001       // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead.
1002       DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
1003       __ save_return_pc(Z_EXC_PC);
1004       const int frame_size_in_bytes = __ push_frame_abi160(0);
1005       oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0);
1006       sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
1007       break;
1008     }
1009     default:  ShouldNotReachHere();
1010   }
1011 
1012   // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time.
1013   __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp);
1014   // Verify that Z_EXC_OOP, contains a valid exception.
1015   __ verify_not_null_oop(Z_EXC_OOP);
1016 
1017   // Check that fields in JavaThread for exception oop and issuing pc
1018   // are empty before writing to them.
1019   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
1020   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
1021 
1022   // Save exception oop and issuing pc into JavaThread.
1023   // (Exception handler will load it from here.)
1024   __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset()));
1025   __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset()));
1026 
1027 #ifdef ASSERT
1028   { NearLabel ok;
1029     __ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc)));
1030     __ branch_optimized(Assembler::bcondEqual, ok);
1031     __ stop("use throwing pc as return address (has bci & oop map)");
1032     __ bind(ok);
1033   }
1034 #endif
1035 
1036   // Compute the exception handler.
1037   // The exception oop and the throwing pc are read from the fields in JavaThread.
1038   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
1039   oop_maps->add_gc_map(call_offset, oop_map);
1040 
1041   // Z_RET(Z_R2): handler address
1042   //   will be the deopt blob if nmethod was deoptimized while we looked up
1043   //   handler regardless of whether handler existed in the nmethod.
1044 
1045   // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call.
1046   __ invalidate_registers(Z_R2);
1047 
1048   switch(id) {
1049     case forward_exception_id:
1050     case handle_exception_nofpu_id:
1051     case handle_exception_id:
1052       // Restore the registers that were saved at the beginning.
1053       __ z_lgr(Z_R1_scratch, Z_R2);   // Restoring live registers kills Z_R2.
1054       restore_live_registers(sasm, id != handle_exception_nofpu_id);  // Pops as well the frame.
1055       __ z_br(Z_R1_scratch);
1056       break;
1057     case handle_exception_from_callee_id: {
1058       __ pop_frame();
1059       __ z_br(Z_R2); // Jump to exception handler.
1060     }
1061     break;
1062     default:  ShouldNotReachHere();
1063   }
1064 
1065   return oop_maps;
1066 }
1067 
1068 
1069 #undef __
1070 
1071 const char *Runtime1::pd_name_for_address(address entry) {
1072   return "<unknown function>";
1073 }