1 /*
   2  * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2016 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_MacroAssembler.hpp"
  29 #include "c1/c1_Runtime1.hpp"
  30 #include "interpreter/interpreter.hpp"
  31 #include "nativeInst_s390.hpp"
  32 #include "oops/compiledICHolder.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/jvmtiExport.hpp"
  35 #include "register_s390.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/signature.hpp"
  38 #include "runtime/vframeArray.hpp"
  39 #include "utilities/macros.hpp"
  40 #include "vmreg_s390.inline.hpp"
  41 #include "registerSaver_s390.hpp"
  42 #if INCLUDE_ALL_GCS
  43 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
  44 #endif
  45 
  46 // Implementation of StubAssembler
  47 
  48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) {
  49   set_num_rt_args(0); // Nothing on stack.
  50   assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
  51 
  52   // We cannot trust that code generated by the C++ compiler saves R14
  53   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
  54   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
  55   // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save
  56   // it into the frame anchor.
  57   address pc = get_PC(Z_R1_scratch);
  58   int call_offset = (int)(pc - addr_at(0));
  59   set_last_Java_frame(Z_SP, Z_R1_scratch);
  60 
  61   // ARG1 must hold thread address.
  62   z_lgr(Z_ARG1, Z_thread);
  63 
  64   address return_pc = NULL;
  65   align_call_far_patchable(this->pc());
  66   return_pc = call_c_opt(entry_point);
  67   assert(return_pc != NULL, "const section overflow");
  68 
  69   reset_last_Java_frame();
  70 
  71   // Check for pending exceptions.
  72   {
  73     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
  74 
  75     // This used to conditionally jump to forward_exception however it is
  76     // possible if we relocate that the branch will not reach. So we must jump
  77     // around so we can always reach.
  78 
  79     Label ok;
  80     z_bre(ok); // Bcondequal is the same as bcondZero.
  81 
  82     // exception pending => forward to exception handler
  83 
  84     // Make sure that the vm_results are cleared.
  85     if (oop_result1->is_valid()) {
  86       clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong));
  87     }
  88     if (metadata_result->is_valid()) {
  89       clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong));
  90     }
  91     if (frame_size() == no_frame_size) {
  92       // Pop the stub frame.
  93       pop_frame();
  94       restore_return_pc();
  95       load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
  96       z_br(Z_R1);
  97     } else if (_stub_id == Runtime1::forward_exception_id) {
  98       should_not_reach_here();
  99     } else {
 100       load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id));
 101       z_br(Z_R1);
 102     }
 103 
 104     bind(ok);
 105   }
 106 
 107   // Get oop results if there are any and reset the values in the thread.
 108   if (oop_result1->is_valid()) {
 109     get_vm_result(oop_result1);
 110   }
 111   if (metadata_result->is_valid()) {
 112     get_vm_result_2(metadata_result);
 113   }
 114 
 115   return call_offset;
 116 }
 117 
 118 
 119 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
 120   // Z_ARG1 is reserved for the thread.
 121   lgr_if_needed(Z_ARG2, arg1);
 122   return call_RT(oop_result1, metadata_result, entry, 1);
 123 }
 124 
 125 
 126 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
 127   // Z_ARG1 is reserved for the thread.
 128   lgr_if_needed(Z_ARG2, arg1);
 129   assert(arg2 != Z_ARG2, "smashed argument");
 130   lgr_if_needed(Z_ARG3, arg2);
 131   return call_RT(oop_result1, metadata_result, entry, 2);
 132 }
 133 
 134 
 135 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
 136   // Z_ARG1 is reserved for the thread.
 137   lgr_if_needed(Z_ARG2, arg1);
 138   assert(arg2 != Z_ARG2, "smashed argument");
 139   lgr_if_needed(Z_ARG3, arg2);
 140   assert(arg3 != Z_ARG3, "smashed argument");
 141   lgr_if_needed(Z_ARG4, arg3);
 142   return call_RT(oop_result1, metadata_result, entry, 3);
 143 }
 144 
 145 
 146 // Implementation of Runtime1
 147 
 148 #define __ sasm->
 149 
 150 #ifndef PRODUCT
 151 #undef  __
 152 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)->
 153 #endif // !PRODUCT
 154 
 155 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
 156 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
 157 
 158 static OopMap* generate_oop_map(StubAssembler* sasm) {
 159   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers;
 160   int frame_size_in_slots =
 161     RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 162   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 163   return RegisterSaver::generate_oop_map(sasm, reg_set);
 164 }
 165 
 166 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) {
 167   __ block_comment("save_live_registers");
 168   RegisterSaver::RegisterSet reg_set =
 169     save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
 170   int frame_size_in_slots =
 171     RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 172   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 173   return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
 174 }
 175 
 176 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) {
 177   if (!save_fpu_registers) {
 178     __ unimplemented(FILE_AND_LINE);
 179   }
 180   __ block_comment("save_live_registers");
 181   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2;
 182   int frame_size_in_slots =
 183       RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 184   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 185   return RegisterSaver::save_live_registers(sasm, reg_set);
 186 }
 187 
 188 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
 189   __ block_comment("save_volatile_registers");
 190   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
 191   int frame_size_in_slots =
 192     RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
 193   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
 194   return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
 195 }
 196 
 197 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
 198   __ block_comment("restore_live_registers");
 199   RegisterSaver::RegisterSet reg_set =
 200     restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers;
 201   RegisterSaver::restore_live_registers(sasm, reg_set);
 202 }
 203 
 204 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) {
 205   if (!restore_fpu_registers) {
 206     __ unimplemented(FILE_AND_LINE);
 207   }
 208   __ block_comment("restore_live_registers_except_r2");
 209   RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
 210 }
 211 
 212 static void restore_volatile_registers(StubAssembler* sasm) {
 213   __ block_comment("restore_volatile_registers");
 214   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
 215   RegisterSaver::restore_live_registers(sasm, reg_set);
 216 }
 217 
 218 void Runtime1::initialize_pd() {
 219   // Nothing to do.
 220 }
 221 
 222 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
 223   // Make a frame and preserve the caller's caller-save registers.
 224   OopMap* oop_map = save_live_registers(sasm);
 225   int call_offset;
 226   if (!has_argument) {
 227     call_offset = __ call_RT(noreg, noreg, target);
 228   } else {
 229     call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch);
 230   }
 231   OopMapSet* oop_maps = new OopMapSet();
 232   oop_maps->add_gc_map(call_offset, oop_map);
 233 
 234   __ should_not_reach_here();
 235   return oop_maps;
 236 }
 237 
 238 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
 239   // Incoming parameters: Z_EXC_OOP and Z_EXC_PC.
 240   // Keep copies in callee-saved registers during runtime call.
 241   const Register exception_oop_callee_saved = Z_R11;
 242   const Register exception_pc_callee_saved = Z_R12;
 243   // Other registers used in this stub.
 244   const Register handler_addr = Z_R4;
 245 
 246   // Verify that only exception_oop, is valid at this time.
 247   __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC);
 248 
 249   // Check that fields in JavaThread for exception oop and issuing pc are set.
 250   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
 251   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
 252 
 253   // Save exception_oop and pc in callee-saved register to preserve it
 254   // during runtime calls.
 255   __ verify_not_null_oop(Z_EXC_OOP);
 256   __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP);
 257   __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC);
 258 
 259   __ push_frame_abi160(0); // Runtime code needs the z_abi_160.
 260 
 261   // Search the exception handler address of the caller (using the return address).
 262   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC);
 263   // Z_RET(Z_R2): exception handler address of the caller.
 264 
 265   __ pop_frame();
 266 
 267   __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET);
 268 
 269   // Move result of call into correct register.
 270   __ lgr_if_needed(handler_addr, Z_RET);
 271 
 272   // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler).
 273   __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved);
 274   __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved);
 275 
 276   // Verify that there is really a valid exception in Z_EXC_OOP.
 277   __ verify_not_null_oop(Z_EXC_OOP);
 278 
 279   __ z_br(handler_addr); // Jump to exception handler.
 280 }
 281 
 282 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 283   // Make a frame and preserve the caller's caller-save registers.
 284   OopMap* oop_map = save_live_registers(sasm);
 285 
 286   // Call the runtime patching routine, returns non-zero if nmethod got deopted.
 287   int call_offset = __ call_RT(noreg, noreg, target);
 288   OopMapSet* oop_maps = new OopMapSet();
 289   oop_maps->add_gc_map(call_offset, oop_map);
 290 
 291   // Re-execute the patched instruction or, if the nmethod was
 292   // deoptmized, return to the deoptimization handler entry that will
 293   // cause re-execution of the current bytecode.
 294   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 295   assert(deopt_blob != NULL, "deoptimization blob must have been created");
 296 
 297   __ z_ltr(Z_RET, Z_RET); // return value == 0
 298 
 299   restore_live_registers(sasm);
 300 
 301   __ z_bcr(Assembler::bcondZero, Z_R14);
 302 
 303   // Return to the deoptimization handler entry for unpacking and
 304   // rexecute if we simply returned then we'd deopt as if any call we
 305   // patched had just returned.
 306   AddressLiteral dest(deopt_blob->unpack_with_reexecution());
 307   __ load_const_optimized(Z_R1_scratch, dest);
 308   __ z_br(Z_R1_scratch);
 309 
 310   return oop_maps;
 311 }
 312 
 313 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 314 
 315   // for better readability
 316   const bool must_gc_arguments = true;
 317   const bool dont_gc_arguments = false;
 318 
 319   // Default value; overwritten for some optimized stubs that are
 320   // called from methods that do not use the fpu.
 321   bool save_fpu_registers = true;
 322 
 323   // Stub code and info for the different stubs.
 324   OopMapSet* oop_maps = NULL;
 325   switch (id) {
 326     case forward_exception_id:
 327       {
 328         oop_maps = generate_handle_exception(id, sasm);
 329         // will not return
 330       }
 331       break;
 332 
 333     case new_instance_id:
 334     case fast_new_instance_id:
 335     case fast_new_instance_init_check_id:
 336       {
 337         Register klass    = Z_R11; // Incoming
 338         Register obj      = Z_R2;  // Result
 339 
 340         if (id == new_instance_id) {
 341           __ set_info("new_instance", dont_gc_arguments);
 342         } else if (id == fast_new_instance_id) {
 343           __ set_info("fast new_instance", dont_gc_arguments);
 344         } else {
 345           assert(id == fast_new_instance_init_check_id, "bad StubID");
 346           __ set_info("fast new_instance init check", dont_gc_arguments);
 347         }
 348 
 349         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
 350             UseTLAB && FastTLABRefill) {
 351           // Sapjvm: must call RT to generate allocation events.
 352         }
 353 
 354         OopMap* map = save_live_registers_except_r2(sasm);
 355         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
 356         oop_maps = new OopMapSet();
 357         oop_maps->add_gc_map(call_offset, map);
 358         restore_live_registers_except_r2(sasm);
 359 
 360         __ verify_oop(obj);
 361         __ z_br(Z_R14);
 362       }
 363       break;
 364 
 365     case counter_overflow_id:
 366       {
 367         // Arguments :
 368         //   bci    : stack param 0
 369         //   method : stack param 1
 370         //
 371         Register bci = Z_ARG2, method = Z_ARG3;
 372         // frame size in bytes
 373         OopMap* map = save_live_registers(sasm);
 374         const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
 375         __ z_lg(bci,    0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 376         __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 377         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
 378         oop_maps = new OopMapSet();
 379         oop_maps->add_gc_map(call_offset, map);
 380         restore_live_registers(sasm);
 381         __ z_br(Z_R14);
 382       }
 383       break;
 384     case new_type_array_id:
 385     case new_object_array_id:
 386       {
 387         Register length   = Z_R13; // Incoming
 388         Register klass    = Z_R11; // Incoming
 389         Register obj      = Z_R2;  // Result
 390 
 391         if (id == new_type_array_id) {
 392           __ set_info("new_type_array", dont_gc_arguments);
 393         } else {
 394           __ set_info("new_object_array", dont_gc_arguments);
 395         }
 396 
 397 #ifdef ASSERT
 398         // Assert object type is really an array of the proper kind.
 399         {
 400           NearLabel ok;
 401           Register t0 = obj;
 402           __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false);
 403           __ z_sra(t0, Klass::_lh_array_tag_shift);
 404           int tag = ((id == new_type_array_id)
 405                      ? Klass::_lh_array_tag_type_value
 406                      : Klass::_lh_array_tag_obj_value);
 407           __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok);
 408           __ stop("assert(is an array klass)");
 409           __ should_not_reach_here();
 410           __ bind(ok);
 411         }
 412 #endif // ASSERT
 413 
 414         if (UseTLAB && FastTLABRefill) {
 415           // sapjvm: must call RT to generate allocation events.
 416         }
 417 
 418         OopMap* map = save_live_registers_except_r2(sasm);
 419         int call_offset;
 420         if (id == new_type_array_id) {
 421           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
 422         } else {
 423           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
 424         }
 425 
 426         oop_maps = new OopMapSet();
 427         oop_maps->add_gc_map(call_offset, map);
 428         restore_live_registers_except_r2(sasm);
 429 
 430         __ verify_oop(obj);
 431         __ z_br(Z_R14);
 432       }
 433       break;
 434 
 435     case new_multi_array_id:
 436       { __ set_info("new_multi_array", dont_gc_arguments);
 437         // Z_R3,: klass
 438         // Z_R4,: rank
 439         // Z_R5: address of 1st dimension
 440         OopMap* map = save_live_registers(sasm);
 441         int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5);
 442 
 443         oop_maps = new OopMapSet();
 444         oop_maps->add_gc_map(call_offset, map);
 445         restore_live_registers_except_r2(sasm);
 446 
 447         // Z_R2,: new multi array
 448         __ verify_oop(Z_R2);
 449         __ z_br(Z_R14);
 450       }
 451       break;
 452 
 453     case register_finalizer_id:
 454       {
 455         __ set_info("register_finalizer", dont_gc_arguments);
 456 
 457         // Load the klass and check the has finalizer flag.
 458         Register klass = Z_ARG2;
 459         __ load_klass(klass, Z_ARG1);
 460         __ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER));
 461         __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set.
 462 
 463         OopMap* oop_map = save_live_registers(sasm);
 464         int call_offset = __ call_RT(noreg, noreg,
 465                                      CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1);
 466         oop_maps = new OopMapSet();
 467         oop_maps->add_gc_map(call_offset, oop_map);
 468 
 469         // Now restore all the live registers.
 470         restore_live_registers(sasm);
 471 
 472         __ z_br(Z_R14);
 473       }
 474       break;
 475 
 476     case throw_range_check_failed_id:
 477       { __ set_info("range_check_failed", dont_gc_arguments);
 478         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
 479       }
 480       break;
 481 
 482     case throw_index_exception_id:
 483       { __ set_info("index_range_check_failed", dont_gc_arguments);
 484         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
 485       }
 486       break;
 487     case throw_div0_exception_id:
 488       { __ set_info("throw_div0_exception", dont_gc_arguments);
 489         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
 490       }
 491       break;
 492     case throw_null_pointer_exception_id:
 493       { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
 494         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
 495       }
 496       break;
 497     case handle_exception_nofpu_id:
 498     case handle_exception_id:
 499       { __ set_info("handle_exception", dont_gc_arguments);
 500         oop_maps = generate_handle_exception(id, sasm);
 501       }
 502       break;
 503     case handle_exception_from_callee_id:
 504       { __ set_info("handle_exception_from_callee", dont_gc_arguments);
 505         oop_maps = generate_handle_exception(id, sasm);
 506       }
 507       break;
 508     case unwind_exception_id:
 509       { __ set_info("unwind_exception", dont_gc_arguments);
 510         // Note: no stubframe since we are about to leave the current
 511         // activation and we are calling a leaf VM function only.
 512         generate_unwind_exception(sasm);
 513       }
 514       break;
 515     case throw_array_store_exception_id:
 516       { __ set_info("throw_array_store_exception", dont_gc_arguments);
 517         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
 518       }
 519       break;
 520     case throw_class_cast_exception_id:
 521     { // Z_R1_scratch: object
 522       __ set_info("throw_class_cast_exception", dont_gc_arguments);
 523       oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
 524     }
 525     break;
 526     case throw_incompatible_class_change_error_id:
 527       { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
 528         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
 529       }
 530       break;
 531     case slow_subtype_check_id:
 532     {
 533       // Arguments :
 534       //   sub  : stack param 0
 535       //   super: stack param 1
 536       //   raddr: Z_R14, blown by call
 537       //
 538       // Result : condition code 0 for match (bcondEqual will be true),
 539       //          condition code 2 for miss  (bcondNotEqual will be true)
 540       NearLabel miss;
 541       const Register Rsubklass   = Z_ARG2; // sub
 542       const Register Rsuperklass = Z_ARG3; // super
 543 
 544       // No args, but tmp registers that are killed.
 545       const Register Rlength     = Z_ARG4; // cache array length
 546       const Register Rarray_ptr  = Z_ARG5; // Current value from cache array.
 547 
 548       if (UseCompressedOops) {
 549         assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
 550       }
 551 
 552       const int frame_size = 4*BytesPerWord + frame::z_abi_160_size;
 553       // Save return pc. This is not necessary, but could be helpful
 554       // in the case of crashes.
 555       __ save_return_pc();
 556       __ push_frame(frame_size);
 557       // Save registers before changing them.
 558       int i = 0;
 559       __ z_stg(Rsubklass,   (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 560       __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 561       __ z_stg(Rlength,     (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 562       __ z_stg(Rarray_ptr,  (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 563       assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
 564 
 565       // Get sub and super from stack.
 566       __ z_lg(Rsubklass,   0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 567       __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP);
 568 
 569       __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss);
 570 
 571       // Match falls through here.
 572       i = 0;
 573       __ z_lg(Rsubklass,   (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 574       __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 575       __ z_lg(Rlength,     (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 576       __ z_lg(Rarray_ptr,  (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 577       assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
 578       __ pop_frame();
 579       // Return pc is still in R_14.
 580       __ clear_reg(Z_R0_scratch);         // Zero indicates a match. Set CC 0 (bcondEqual will be true)
 581       __ z_br(Z_R14);
 582 
 583       __ BIND(miss);
 584       i = 0;
 585       __ z_lg(Rsubklass,   (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 586       __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 587       __ z_lg(Rlength,     (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 588       __ z_lg(Rarray_ptr,  (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP);
 589       assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check");
 590       __ pop_frame();
 591       // return pc is still in R_14
 592       __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss.
 593       __ z_ltgr(Z_R0_scratch, Z_R0_scratch);    // Set CC 2 (bcondNotEqual will be true).
 594       __ z_br(Z_R14);
 595     }
 596     break;
 597     case monitorenter_nofpu_id:
 598     case monitorenter_id:
 599       { // Z_R1_scratch : object
 600         // Z_R13       : lock address (see LIRGenerator::syncTempOpr())
 601         __ set_info("monitorenter", dont_gc_arguments);
 602 
 603         int save_fpu_registers = (id == monitorenter_id);
 604         // Make a frame and preserve the caller's caller-save registers.
 605         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 606 
 607         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13);
 608 
 609         oop_maps = new OopMapSet();
 610         oop_maps->add_gc_map(call_offset, oop_map);
 611         restore_live_registers(sasm, save_fpu_registers);
 612 
 613         __ z_br(Z_R14);
 614       }
 615       break;
 616 
 617     case monitorexit_nofpu_id:
 618     case monitorexit_id:
 619       { // Z_R1_scratch : lock address
 620         // Note: really a leaf routine but must setup last java sp
 621         //   => Use call_RT for now (speed can be improved by
 622         //      doing last java sp setup manually).
 623         __ set_info("monitorexit", dont_gc_arguments);
 624 
 625         int save_fpu_registers = (id == monitorexit_id);
 626         // Make a frame and preserve the caller's caller-save registers.
 627         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 628 
 629         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch);
 630 
 631         oop_maps = new OopMapSet();
 632         oop_maps->add_gc_map(call_offset, oop_map);
 633         restore_live_registers(sasm, save_fpu_registers);
 634 
 635         __ z_br(Z_R14);
 636       }
 637       break;
 638 
 639     case deoptimize_id:
 640       { // Args: Z_R1_scratch: trap request
 641         __ set_info("deoptimize", dont_gc_arguments);
 642         Register trap_request = Z_R1_scratch;
 643         OopMap* oop_map = save_live_registers(sasm);
 644         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request);
 645         oop_maps = new OopMapSet();
 646         oop_maps->add_gc_map(call_offset, oop_map);
 647         restore_live_registers(sasm);
 648         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 649         assert(deopt_blob != NULL, "deoptimization blob must have been created");
 650         AddressLiteral dest(deopt_blob->unpack_with_reexecution());
 651         __ load_const_optimized(Z_R1_scratch, dest);
 652         __ z_br(Z_R1_scratch);
 653       }
 654       break;
 655 
 656     case access_field_patching_id:
 657       { __ set_info("access_field_patching", dont_gc_arguments);
 658         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
 659       }
 660       break;
 661 
 662     case load_klass_patching_id:
 663       { __ set_info("load_klass_patching", dont_gc_arguments);
 664         // We should set up register map.
 665         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
 666       }
 667       break;
 668 
 669     case load_mirror_patching_id:
 670       { __ set_info("load_mirror_patching", dont_gc_arguments);
 671         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
 672       }
 673       break;
 674 
 675     case load_appendix_patching_id:
 676       { __ set_info("load_appendix_patching", dont_gc_arguments);
 677         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
 678       }
 679       break;
 680 #if 0
 681     case dtrace_object_alloc_id:
 682       { // rax,: object
 683         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
 684         // We can't gc here so skip the oopmap but make sure that all
 685         // the live registers get saved.
 686         save_live_registers(sasm, 1);
 687 
 688         __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
 689         __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
 690         NOT_LP64(__ pop(rax));
 691 
 692         restore_live_registers(sasm);
 693       }
 694       break;
 695 
 696     case fpu2long_stub_id:
 697       {
 698         // rax, and rdx are destroyed, but should be free since the result is returned there
 699         // preserve rsi,ecx
 700         __ push(rsi);
 701         __ push(rcx);
 702         LP64_ONLY(__ push(rdx);)
 703 
 704         // check for NaN
 705         Label return0, do_return, return_min_jlong, do_convert;
 706 
 707         Address value_high_word(rsp, wordSize + 4);
 708         Address value_low_word(rsp, wordSize);
 709         Address result_high_word(rsp, 3*wordSize + 4);
 710         Address result_low_word(rsp, 3*wordSize);
 711 
 712         __ subptr(rsp, 32);                    // more than enough on 32bit
 713         __ fst_d(value_low_word);
 714         __ movl(rax, value_high_word);
 715         __ andl(rax, 0x7ff00000);
 716         __ cmpl(rax, 0x7ff00000);
 717         __ jcc(Assembler::notEqual, do_convert);
 718         __ movl(rax, value_high_word);
 719         __ andl(rax, 0xfffff);
 720         __ orl(rax, value_low_word);
 721         __ jcc(Assembler::notZero, return0);
 722 
 723         __ bind(do_convert);
 724         __ fnstcw(Address(rsp, 0));
 725         __ movzwl(rax, Address(rsp, 0));
 726         __ orl(rax, 0xc00);
 727         __ movw(Address(rsp, 2), rax);
 728         __ fldcw(Address(rsp, 2));
 729         __ fwait();
 730         __ fistp_d(result_low_word);
 731         __ fldcw(Address(rsp, 0));
 732         __ fwait();
 733         // This gets the entire long in rax on 64bit
 734         __ movptr(rax, result_low_word);
 735         // testing of high bits
 736         __ movl(rdx, result_high_word);
 737         __ mov(rcx, rax);
 738         // What the heck is the point of the next instruction???
 739         __ xorl(rcx, 0x0);
 740         __ movl(rsi, 0x80000000);
 741         __ xorl(rsi, rdx);
 742         __ orl(rcx, rsi);
 743         __ jcc(Assembler::notEqual, do_return);
 744         __ fldz();
 745         __ fcomp_d(value_low_word);
 746         __ fnstsw_ax();
 747         __ testl(rax, 0x4100);  // ZF & CF == 0
 748         __ jcc(Assembler::equal, return_min_jlong);
 749         // return max_jlong
 750         __ mov64(rax, CONST64(0x7fffffffffffffff));
 751         __ jmp(do_return);
 752 
 753         __ bind(return_min_jlong);
 754         __ mov64(rax, UCONST64(0x8000000000000000));
 755         __ jmp(do_return);
 756 
 757         __ bind(return0);
 758         __ fpop();
 759         __ xorptr(rax, rax);
 760 
 761         __ bind(do_return);
 762         __ addptr(rsp, 32);
 763         LP64_ONLY(__ pop(rdx);)
 764         __ pop(rcx);
 765         __ pop(rsi);
 766         __ ret(0);
 767       }
 768       break;
 769 #endif // TODO
 770 
 771 #if INCLUDE_ALL_GCS
 772     case g1_pre_barrier_slow_id:
 773     case g1_pre_barrier_slow_with_recheck_id:
 774       { // Z_R1_scratch: previous value of memory
 775 
 776         BarrierSet* bs = Universe::heap()->barrier_set();
 777         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
 778           __ should_not_reach_here(FILE_AND_LINE);
 779           break;
 780         }
 781 
 782         if (id == g1_pre_barrier_slow_id) {
 783           __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
 784         } else {
 785           __ set_info("g1_pre_barrier_slow_with_recheck_id", dont_gc_arguments);
 786         }
 787 
 788         Register pre_val = Z_R1_scratch;
 789         Register tmp  = Z_R6; // Must be non-volatile because it is used to save pre_val.
 790         Register tmp2 = Z_R7;
 791 
 792         Label refill, restart, marking_not_active;
 793         int satb_q_active_byte_offset =
 794           in_bytes(JavaThread::satb_mark_queue_offset() +
 795                    SATBMarkQueue::byte_offset_of_active());
 796         int satb_q_index_byte_offset =
 797           in_bytes(JavaThread::satb_mark_queue_offset() +
 798                    SATBMarkQueue::byte_offset_of_index());
 799         int satb_q_buf_byte_offset =
 800           in_bytes(JavaThread::satb_mark_queue_offset() +
 801                    SATBMarkQueue::byte_offset_of_buf());
 802 
 803         // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
 804         __ z_stg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 805         __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 806 
 807         if (id == g1_pre_barrier_slow_with_recheck_id) {
 808           // Is marking still active?
 809           if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
 810             __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
 811           } else {
 812             guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
 813             __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset`));
 814           }
 815           __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
 816         }
 817 
 818         __ bind(restart);
 819         // Load the index into the SATB buffer. SATBMarkQueue::_index is a
 820         // size_t so ld_ptr is appropriate.
 821         __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
 822 
 823         // index == 0?
 824         __ z_brz(refill);
 825 
 826         __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
 827         __ add2reg(tmp, -oopSize);
 828 
 829         __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
 830         __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
 831 
 832         __ bind(marking_not_active);
 833         // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
 834         __ z_lg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 835         __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 836         __ z_br(Z_R14);
 837 
 838         __ bind(refill);
 839         save_volatile_registers(sasm);
 840         __ z_lgr(tmp, pre_val); // save pre_val
 841         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
 842                         Z_thread);
 843         __ z_lgr(pre_val, tmp); // restore pre_val
 844         restore_volatile_registers(sasm);
 845         __ z_bru(restart);
 846       }
 847       break;
 848 
 849     case g1_post_barrier_slow_id:
 850       { // Z_R1_scratch: oop address, address of updated memory slot
 851         BarrierSet* bs = Universe::heap()->barrier_set();
 852         if (bs->kind() != BarrierSet::G1SATBCTLogging) {
 853           __ should_not_reach_here(FILE_AND_LINE);
 854           break;
 855         }
 856 
 857         __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
 858 
 859         Register addr_oop  = Z_R1_scratch;
 860         Register addr_card = Z_R1_scratch;
 861         Register r1        = Z_R6; // Must be saved/restored.
 862         Register r2        = Z_R7; // Must be saved/restored.
 863         Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
 864         jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
 865 
 866         // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
 867         __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 868 
 869         Label not_already_dirty, restart, refill, young_card;
 870 
 871         // Calculate address of card corresponding to the updated oop slot.
 872         AddressLiteral rs(byte_map_base);
 873         __ z_srlg(addr_card, addr_oop, CardTableModRefBS::card_shift);
 874         addr_oop = noreg; // dead now
 875         __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
 876         __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
 877 
 878         __ z_cli(0, addr_card, (int)G1SATBCardTableModRefBS::g1_young_card_val());
 879         __ z_bre(young_card);
 880 
 881         __ z_sync(); // Required to support concurrent cleaning.
 882 
 883         __ z_cli(0, addr_card, (int)CardTableModRefBS::dirty_card_val());
 884         __ z_brne(not_already_dirty);
 885 
 886         __ bind(young_card);
 887         // We didn't take the branch, so we're already dirty: restore
 888         // used registers and return.
 889         __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 890         __ z_br(Z_R14);
 891 
 892         // Not dirty.
 893         __ bind(not_already_dirty);
 894 
 895         // First, dirty it: [addr_card] := 0
 896         __ z_mvi(0, addr_card, CardTableModRefBS::dirty_card_val());
 897 
 898         Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
 899         Register buf = r2;
 900         cardtable = noreg; // now dead
 901 
 902         // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
 903         __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 904 
 905         ByteSize dirty_card_q_index_byte_offset =
 906           JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_index();
 907         ByteSize dirty_card_q_buf_byte_offset =
 908           JavaThread::dirty_card_queue_offset() + DirtyCardQueue::byte_offset_of_buf();
 909 
 910         __ bind(restart);
 911 
 912         // Get the index into the update buffer. DirtyCardQueue::_index is
 913         // a size_t so z_ltg is appropriate here.
 914         __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
 915 
 916         // index == 0?
 917         __ z_brz(refill);
 918 
 919         __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
 920         __ add2reg(idx, -oopSize);
 921 
 922         __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
 923         __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
 924         // Restore killed registers and return.
 925         __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 926         __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
 927         __ z_br(Z_R14);
 928 
 929         __ bind(refill);
 930         save_volatile_registers(sasm);
 931         __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
 932         __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
 933                                          Z_thread);
 934         __ z_lgr(addr_card, idx);
 935         restore_volatile_registers(sasm); // Restore addr_card.
 936         __ z_bru(restart);
 937       }
 938       break;
 939 #endif // INCLUDE_ALL_GCS
 940     case predicate_failed_trap_id:
 941       {
 942         __ set_info("predicate_failed_trap", dont_gc_arguments);
 943 
 944         OopMap* map = save_live_registers(sasm);
 945 
 946         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
 947         oop_maps = new OopMapSet();
 948         oop_maps->add_gc_map(call_offset, map);
 949         restore_live_registers(sasm);
 950 
 951         DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
 952         assert(deopt_blob != NULL, "deoptimization blob must have been created");
 953 
 954         __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution());
 955         __ z_br(Z_R1_scratch);
 956       }
 957       break;
 958 
 959     default:
 960       {
 961         __ should_not_reach_here(FILE_AND_LINE, id);
 962       }
 963       break;
 964   }
 965   return oop_maps;
 966 }
 967 
 968 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
 969   __ block_comment("generate_handle_exception");
 970 
 971   // incoming parameters: Z_EXC_OOP, Z_EXC_PC
 972 
 973   // Save registers if required.
 974   OopMapSet* oop_maps = new OopMapSet();
 975   OopMap* oop_map = NULL;
 976   Register reg_fp = Z_R1_scratch;
 977 
 978   switch (id) {
 979     case forward_exception_id: {
 980       // We're handling an exception in the context of a compiled frame.
 981       // The registers have been saved in the standard places. Perform
 982       // an exception lookup in the caller and dispatch to the handler
 983       // if found. Otherwise unwind and dispatch to the callers
 984       // exception handler.
 985       oop_map = generate_oop_map(sasm);
 986 
 987       // Load and clear pending exception oop into.
 988       __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset()));
 989       __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8);
 990 
 991       // Different stubs forward their exceptions; they should all have similar frame layouts
 992       // (a) to find their return address (b) for a correct oop_map generated above.
 993       assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) ==
 994              RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement");
 995 
 996       // Load issuing PC (the return address for this stub).
 997       const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size;
 998       __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc)));
 999       DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));)
1000 
1001       // Make sure that the vm_results are cleared (may be unnecessary).
1002       __ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()),   sizeof(oop));
1003       __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*));
1004       break;
1005     }
1006     case handle_exception_nofpu_id:
1007     case handle_exception_id:
1008       // At this point all registers MAY be live.
1009       DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
1010       oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC);
1011       break;
1012     case handle_exception_from_callee_id: {
1013       // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead.
1014       DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
1015       __ save_return_pc(Z_EXC_PC);
1016       const int frame_size_in_bytes = __ push_frame_abi160(0);
1017       oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0);
1018       sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
1019       break;
1020     }
1021     default:  ShouldNotReachHere();
1022   }
1023 
1024   // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time.
1025   __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp);
1026   // Verify that Z_EXC_OOP, contains a valid exception.
1027   __ verify_not_null_oop(Z_EXC_OOP);
1028 
1029   // Check that fields in JavaThread for exception oop and issuing pc
1030   // are empty before writing to them.
1031   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0);
1032   __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0);
1033 
1034   // Save exception oop and issuing pc into JavaThread.
1035   // (Exception handler will load it from here.)
1036   __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset()));
1037   __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset()));
1038 
1039 #ifdef ASSERT
1040   { NearLabel ok;
1041     __ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc)));
1042     __ branch_optimized(Assembler::bcondEqual, ok);
1043     __ stop("use throwing pc as return address (has bci & oop map)");
1044     __ bind(ok);
1045   }
1046 #endif
1047 
1048   // Compute the exception handler.
1049   // The exception oop and the throwing pc are read from the fields in JavaThread.
1050   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
1051   oop_maps->add_gc_map(call_offset, oop_map);
1052 
1053   // Z_RET(Z_R2): handler address
1054   //   will be the deopt blob if nmethod was deoptimized while we looked up
1055   //   handler regardless of whether handler existed in the nmethod.
1056 
1057   // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call.
1058   __ invalidate_registers(Z_R2);
1059 
1060   switch(id) {
1061     case forward_exception_id:
1062     case handle_exception_nofpu_id:
1063     case handle_exception_id:
1064       // Restore the registers that were saved at the beginning.
1065       __ z_lgr(Z_R1_scratch, Z_R2);   // Restoring live registers kills Z_R2.
1066       restore_live_registers(sasm, id != handle_exception_nofpu_id);  // Pops as well the frame.
1067       __ z_br(Z_R1_scratch);
1068       break;
1069     case handle_exception_from_callee_id: {
1070       __ pop_frame();
1071       __ z_br(Z_R2); // Jump to exception handler.
1072     }
1073     break;
1074     default:  ShouldNotReachHere();
1075   }
1076 
1077   return oop_maps;
1078 }
1079 
1080 
1081 #undef __
1082 
1083 const char *Runtime1::pd_name_for_address(address entry) {
1084   return "<unknown function>";
1085 }