1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_Defs.hpp" 29 #include "c1/c1_MacroAssembler.hpp" 30 #include "c1/c1_Runtime1.hpp" 31 #include "ci/ciUtilities.hpp" 32 #include "gc/shared/cardTable.hpp" 33 #include "gc/shared/cardTableBarrierSet.hpp" 34 #include "interpreter/interpreter.hpp" 35 #include "memory/universe.hpp" 36 #include "nativeInst_s390.hpp" 37 #include "oops/compiledICHolder.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "register_s390.hpp" 41 #include "runtime/sharedRuntime.hpp" 42 #include "runtime/signature.hpp" 43 #include "runtime/vframeArray.hpp" 44 #include "utilities/macros.hpp" 45 #include "vmreg_s390.inline.hpp" 46 #include "registerSaver_s390.hpp" 47 48 // Implementation of StubAssembler 49 50 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { 51 set_num_rt_args(0); // Nothing on stack. 52 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 53 54 // We cannot trust that code generated by the C++ compiler saves R14 55 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 56 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 57 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save 58 // it into the frame anchor. 59 address pc = get_PC(Z_R1_scratch); 60 int call_offset = (int)(pc - addr_at(0)); 61 set_last_Java_frame(Z_SP, Z_R1_scratch); 62 63 // ARG1 must hold thread address. 64 z_lgr(Z_ARG1, Z_thread); 65 66 address return_pc = NULL; 67 align_call_far_patchable(this->pc()); 68 return_pc = call_c_opt(entry_point); 69 assert(return_pc != NULL, "const section overflow"); 70 71 reset_last_Java_frame(); 72 73 // Check for pending exceptions. 74 { 75 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 76 77 // This used to conditionally jump to forward_exception however it is 78 // possible if we relocate that the branch will not reach. So we must jump 79 // around so we can always reach. 80 81 Label ok; 82 z_bre(ok); // Bcondequal is the same as bcondZero. 83 84 // exception pending => forward to exception handler 85 86 // Make sure that the vm_results are cleared. 87 if (oop_result1->is_valid()) { 88 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong)); 89 } 90 if (metadata_result->is_valid()) { 91 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong)); 92 } 93 if (frame_size() == no_frame_size) { 94 // Pop the stub frame. 95 pop_frame(); 96 restore_return_pc(); 97 load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 98 z_br(Z_R1); 99 } else if (_stub_id == Runtime1::forward_exception_id) { 100 should_not_reach_here(); 101 } else { 102 load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id)); 103 z_br(Z_R1); 104 } 105 106 bind(ok); 107 } 108 109 // Get oop results if there are any and reset the values in the thread. 110 if (oop_result1->is_valid()) { 111 get_vm_result(oop_result1); 112 } 113 if (metadata_result->is_valid()) { 114 get_vm_result_2(metadata_result); 115 } 116 117 return call_offset; 118 } 119 120 121 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 122 // Z_ARG1 is reserved for the thread. 123 lgr_if_needed(Z_ARG2, arg1); 124 return call_RT(oop_result1, metadata_result, entry, 1); 125 } 126 127 128 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 129 // Z_ARG1 is reserved for the thread. 130 lgr_if_needed(Z_ARG2, arg1); 131 assert(arg2 != Z_ARG2, "smashed argument"); 132 lgr_if_needed(Z_ARG3, arg2); 133 return call_RT(oop_result1, metadata_result, entry, 2); 134 } 135 136 137 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 138 // Z_ARG1 is reserved for the thread. 139 lgr_if_needed(Z_ARG2, arg1); 140 assert(arg2 != Z_ARG2, "smashed argument"); 141 lgr_if_needed(Z_ARG3, arg2); 142 assert(arg3 != Z_ARG3, "smashed argument"); 143 lgr_if_needed(Z_ARG4, arg3); 144 return call_RT(oop_result1, metadata_result, entry, 3); 145 } 146 147 148 // Implementation of Runtime1 149 150 #define __ sasm-> 151 152 #ifndef PRODUCT 153 #undef __ 154 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)-> 155 #endif // !PRODUCT 156 157 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 158 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 159 160 static OopMap* generate_oop_map(StubAssembler* sasm) { 161 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers; 162 int frame_size_in_slots = 163 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 164 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 165 return RegisterSaver::generate_oop_map(sasm, reg_set); 166 } 167 168 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) { 169 __ block_comment("save_live_registers"); 170 RegisterSaver::RegisterSet reg_set = 171 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 172 int frame_size_in_slots = 173 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 174 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 175 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc); 176 } 177 178 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) { 179 if (!save_fpu_registers) { 180 __ unimplemented(FILE_AND_LINE); 181 } 182 __ block_comment("save_live_registers"); 183 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2; 184 int frame_size_in_slots = 185 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 186 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 187 return RegisterSaver::save_live_registers(sasm, reg_set); 188 } 189 190 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 191 __ block_comment("restore_live_registers"); 192 RegisterSaver::RegisterSet reg_set = 193 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 194 RegisterSaver::restore_live_registers(sasm, reg_set); 195 } 196 197 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) { 198 if (!restore_fpu_registers) { 199 __ unimplemented(FILE_AND_LINE); 200 } 201 __ block_comment("restore_live_registers_except_r2"); 202 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2); 203 } 204 205 void Runtime1::initialize_pd() { 206 // Nothing to do. 207 } 208 209 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 210 // Make a frame and preserve the caller's caller-save registers. 211 OopMap* oop_map = save_live_registers(sasm); 212 int call_offset; 213 if (!has_argument) { 214 call_offset = __ call_RT(noreg, noreg, target); 215 } else { 216 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch); 217 } 218 OopMapSet* oop_maps = new OopMapSet(); 219 oop_maps->add_gc_map(call_offset, oop_map); 220 221 __ should_not_reach_here(); 222 return oop_maps; 223 } 224 225 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 226 // Incoming parameters: Z_EXC_OOP and Z_EXC_PC. 227 // Keep copies in callee-saved registers during runtime call. 228 const Register exception_oop_callee_saved = Z_R11; 229 const Register exception_pc_callee_saved = Z_R12; 230 // Other registers used in this stub. 231 const Register handler_addr = Z_R4; 232 233 // Verify that only exception_oop, is valid at this time. 234 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC); 235 236 // Check that fields in JavaThread for exception oop and issuing pc are set. 237 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 238 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 239 240 // Save exception_oop and pc in callee-saved register to preserve it 241 // during runtime calls. 242 __ verify_not_null_oop(Z_EXC_OOP); 243 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); 244 __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC); 245 246 __ push_frame_abi160(0); // Runtime code needs the z_abi_160. 247 248 // Search the exception handler address of the caller (using the return address). 249 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC); 250 // Z_RET(Z_R2): exception handler address of the caller. 251 252 __ pop_frame(); 253 254 __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET); 255 256 // Move result of call into correct register. 257 __ lgr_if_needed(handler_addr, Z_RET); 258 259 // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler). 260 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); 261 __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved); 262 263 // Verify that there is really a valid exception in Z_EXC_OOP. 264 __ verify_not_null_oop(Z_EXC_OOP); 265 266 __ z_br(handler_addr); // Jump to exception handler. 267 } 268 269 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 270 // Make a frame and preserve the caller's caller-save registers. 271 OopMap* oop_map = save_live_registers(sasm); 272 273 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 274 int call_offset = __ call_RT(noreg, noreg, target); 275 OopMapSet* oop_maps = new OopMapSet(); 276 oop_maps->add_gc_map(call_offset, oop_map); 277 278 // Re-execute the patched instruction or, if the nmethod was 279 // deoptmized, return to the deoptimization handler entry that will 280 // cause re-execution of the current bytecode. 281 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 282 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 283 284 __ z_ltr(Z_RET, Z_RET); // return value == 0 285 286 restore_live_registers(sasm); 287 288 __ z_bcr(Assembler::bcondZero, Z_R14); 289 290 // Return to the deoptimization handler entry for unpacking and 291 // rexecute if we simply returned then we'd deopt as if any call we 292 // patched had just returned. 293 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 294 __ load_const_optimized(Z_R1_scratch, dest); 295 __ z_br(Z_R1_scratch); 296 297 return oop_maps; 298 } 299 300 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 301 302 // for better readability 303 const bool must_gc_arguments = true; 304 const bool dont_gc_arguments = false; 305 306 // Default value; overwritten for some optimized stubs that are 307 // called from methods that do not use the fpu. 308 bool save_fpu_registers = true; 309 310 // Stub code and info for the different stubs. 311 OopMapSet* oop_maps = NULL; 312 switch (id) { 313 case forward_exception_id: 314 { 315 oop_maps = generate_handle_exception(id, sasm); 316 // will not return 317 } 318 break; 319 320 case new_instance_id: 321 case fast_new_instance_id: 322 case fast_new_instance_init_check_id: 323 { 324 Register klass = Z_R11; // Incoming 325 Register obj = Z_R2; // Result 326 327 if (id == new_instance_id) { 328 __ set_info("new_instance", dont_gc_arguments); 329 } else if (id == fast_new_instance_id) { 330 __ set_info("fast new_instance", dont_gc_arguments); 331 } else { 332 assert(id == fast_new_instance_init_check_id, "bad StubID"); 333 __ set_info("fast new_instance init check", dont_gc_arguments); 334 } 335 336 OopMap* map = save_live_registers_except_r2(sasm); 337 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 338 oop_maps = new OopMapSet(); 339 oop_maps->add_gc_map(call_offset, map); 340 restore_live_registers_except_r2(sasm); 341 342 __ verify_oop(obj); 343 __ z_br(Z_R14); 344 } 345 break; 346 347 case counter_overflow_id: 348 { 349 // Arguments : 350 // bci : stack param 0 351 // method : stack param 1 352 // 353 Register bci = Z_ARG2, method = Z_ARG3; 354 // frame size in bytes 355 OopMap* map = save_live_registers(sasm); 356 const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 357 __ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 358 __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 359 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 360 oop_maps = new OopMapSet(); 361 oop_maps->add_gc_map(call_offset, map); 362 restore_live_registers(sasm); 363 __ z_br(Z_R14); 364 } 365 break; 366 case new_type_array_id: 367 case new_object_array_id: 368 { 369 Register length = Z_R13; // Incoming 370 Register klass = Z_R11; // Incoming 371 Register obj = Z_R2; // Result 372 373 if (id == new_type_array_id) { 374 __ set_info("new_type_array", dont_gc_arguments); 375 } else { 376 __ set_info("new_object_array", dont_gc_arguments); 377 } 378 379 #ifdef ASSERT 380 // Assert object type is really an array of the proper kind. 381 { 382 NearLabel ok; 383 Register t0 = obj; 384 __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false); 385 __ z_sra(t0, Klass::_lh_array_tag_shift); 386 int tag = ((id == new_type_array_id) 387 ? Klass::_lh_array_tag_type_value 388 : Klass::_lh_array_tag_obj_value); 389 __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok); 390 __ stop("assert(is an array klass)"); 391 __ should_not_reach_here(); 392 __ bind(ok); 393 } 394 #endif // ASSERT 395 396 OopMap* map = save_live_registers_except_r2(sasm); 397 int call_offset; 398 if (id == new_type_array_id) { 399 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 400 } else { 401 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 402 } 403 404 oop_maps = new OopMapSet(); 405 oop_maps->add_gc_map(call_offset, map); 406 restore_live_registers_except_r2(sasm); 407 408 __ verify_oop(obj); 409 __ z_br(Z_R14); 410 } 411 break; 412 413 case new_multi_array_id: 414 { __ set_info("new_multi_array", dont_gc_arguments); 415 // Z_R3,: klass 416 // Z_R4,: rank 417 // Z_R5: address of 1st dimension 418 OopMap* map = save_live_registers(sasm); 419 int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5); 420 421 oop_maps = new OopMapSet(); 422 oop_maps->add_gc_map(call_offset, map); 423 restore_live_registers_except_r2(sasm); 424 425 // Z_R2,: new multi array 426 __ verify_oop(Z_R2); 427 __ z_br(Z_R14); 428 } 429 break; 430 431 case register_finalizer_id: 432 { 433 __ set_info("register_finalizer", dont_gc_arguments); 434 435 // Load the klass and check the has finalizer flag. 436 Register klass = Z_ARG2; 437 __ load_klass(klass, Z_ARG1); 438 __ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER)); 439 __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set. 440 441 OopMap* oop_map = save_live_registers(sasm); 442 int call_offset = __ call_RT(noreg, noreg, 443 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1); 444 oop_maps = new OopMapSet(); 445 oop_maps->add_gc_map(call_offset, oop_map); 446 447 // Now restore all the live registers. 448 restore_live_registers(sasm); 449 450 __ z_br(Z_R14); 451 } 452 break; 453 454 case throw_range_check_failed_id: 455 { __ set_info("range_check_failed", dont_gc_arguments); 456 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 457 } 458 break; 459 460 case throw_index_exception_id: 461 { __ set_info("index_range_check_failed", dont_gc_arguments); 462 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 463 } 464 break; 465 case throw_div0_exception_id: 466 { __ set_info("throw_div0_exception", dont_gc_arguments); 467 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 468 } 469 break; 470 case throw_null_pointer_exception_id: 471 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 472 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 473 } 474 break; 475 case handle_exception_nofpu_id: 476 case handle_exception_id: 477 { __ set_info("handle_exception", dont_gc_arguments); 478 oop_maps = generate_handle_exception(id, sasm); 479 } 480 break; 481 case handle_exception_from_callee_id: 482 { __ set_info("handle_exception_from_callee", dont_gc_arguments); 483 oop_maps = generate_handle_exception(id, sasm); 484 } 485 break; 486 case unwind_exception_id: 487 { __ set_info("unwind_exception", dont_gc_arguments); 488 // Note: no stubframe since we are about to leave the current 489 // activation and we are calling a leaf VM function only. 490 generate_unwind_exception(sasm); 491 } 492 break; 493 case throw_array_store_exception_id: 494 { __ set_info("throw_array_store_exception", dont_gc_arguments); 495 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 496 } 497 break; 498 case throw_class_cast_exception_id: 499 { // Z_R1_scratch: object 500 __ set_info("throw_class_cast_exception", dont_gc_arguments); 501 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 502 } 503 break; 504 case throw_incompatible_class_change_error_id: 505 { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 506 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 507 } 508 break; 509 case slow_subtype_check_id: 510 { 511 // Arguments : 512 // sub : stack param 0 513 // super: stack param 1 514 // raddr: Z_R14, blown by call 515 // 516 // Result : condition code 0 for match (bcondEqual will be true), 517 // condition code 2 for miss (bcondNotEqual will be true) 518 NearLabel miss; 519 const Register Rsubklass = Z_ARG2; // sub 520 const Register Rsuperklass = Z_ARG3; // super 521 522 // No args, but tmp registers that are killed. 523 const Register Rlength = Z_ARG4; // cache array length 524 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 525 526 if (UseCompressedOops) { 527 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); 528 } 529 530 const int frame_size = 4*BytesPerWord + frame::z_abi_160_size; 531 // Save return pc. This is not necessary, but could be helpful 532 // in the case of crashes. 533 __ save_return_pc(); 534 __ push_frame(frame_size); 535 // Save registers before changing them. 536 int i = 0; 537 __ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 538 __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 539 __ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 540 __ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 541 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 542 543 // Get sub and super from stack. 544 __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 545 __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 546 547 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss); 548 549 // Match falls through here. 550 i = 0; 551 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 552 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 553 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 554 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 555 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 556 __ pop_frame(); 557 // Return pc is still in R_14. 558 __ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true) 559 __ z_br(Z_R14); 560 561 __ BIND(miss); 562 i = 0; 563 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 564 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 565 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 566 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 567 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 568 __ pop_frame(); 569 // return pc is still in R_14 570 __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss. 571 __ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true). 572 __ z_br(Z_R14); 573 } 574 break; 575 case monitorenter_nofpu_id: 576 case monitorenter_id: 577 { // Z_R1_scratch : object 578 // Z_R13 : lock address (see LIRGenerator::syncTempOpr()) 579 __ set_info("monitorenter", dont_gc_arguments); 580 581 int save_fpu_registers = (id == monitorenter_id); 582 // Make a frame and preserve the caller's caller-save registers. 583 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 584 585 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13); 586 587 oop_maps = new OopMapSet(); 588 oop_maps->add_gc_map(call_offset, oop_map); 589 restore_live_registers(sasm, save_fpu_registers); 590 591 __ z_br(Z_R14); 592 } 593 break; 594 595 case monitorexit_nofpu_id: 596 case monitorexit_id: 597 { // Z_R1_scratch : lock address 598 // Note: really a leaf routine but must setup last java sp 599 // => Use call_RT for now (speed can be improved by 600 // doing last java sp setup manually). 601 __ set_info("monitorexit", dont_gc_arguments); 602 603 int save_fpu_registers = (id == monitorexit_id); 604 // Make a frame and preserve the caller's caller-save registers. 605 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 606 607 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch); 608 609 oop_maps = new OopMapSet(); 610 oop_maps->add_gc_map(call_offset, oop_map); 611 restore_live_registers(sasm, save_fpu_registers); 612 613 __ z_br(Z_R14); 614 } 615 break; 616 617 case deoptimize_id: 618 { // Args: Z_R1_scratch: trap request 619 __ set_info("deoptimize", dont_gc_arguments); 620 Register trap_request = Z_R1_scratch; 621 OopMap* oop_map = save_live_registers(sasm); 622 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request); 623 oop_maps = new OopMapSet(); 624 oop_maps->add_gc_map(call_offset, oop_map); 625 restore_live_registers(sasm); 626 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 627 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 628 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 629 __ load_const_optimized(Z_R1_scratch, dest); 630 __ z_br(Z_R1_scratch); 631 } 632 break; 633 634 case access_field_patching_id: 635 { __ set_info("access_field_patching", dont_gc_arguments); 636 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 637 } 638 break; 639 640 case load_klass_patching_id: 641 { __ set_info("load_klass_patching", dont_gc_arguments); 642 // We should set up register map. 643 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 644 } 645 break; 646 647 case load_mirror_patching_id: 648 { __ set_info("load_mirror_patching", dont_gc_arguments); 649 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 650 } 651 break; 652 653 case load_appendix_patching_id: 654 { __ set_info("load_appendix_patching", dont_gc_arguments); 655 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 656 } 657 break; 658 #if 0 659 case dtrace_object_alloc_id: 660 { // rax,: object 661 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 662 // We can't gc here so skip the oopmap but make sure that all 663 // the live registers get saved. 664 save_live_registers(sasm, 1); 665 666 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 667 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 668 NOT_LP64(__ pop(rax)); 669 670 restore_live_registers(sasm); 671 } 672 break; 673 674 case fpu2long_stub_id: 675 { 676 // rax, and rdx are destroyed, but should be free since the result is returned there 677 // preserve rsi,ecx 678 __ push(rsi); 679 __ push(rcx); 680 LP64_ONLY(__ push(rdx);) 681 682 // check for NaN 683 Label return0, do_return, return_min_jlong, do_convert; 684 685 Address value_high_word(rsp, wordSize + 4); 686 Address value_low_word(rsp, wordSize); 687 Address result_high_word(rsp, 3*wordSize + 4); 688 Address result_low_word(rsp, 3*wordSize); 689 690 __ subptr(rsp, 32); // more than enough on 32bit 691 __ fst_d(value_low_word); 692 __ movl(rax, value_high_word); 693 __ andl(rax, 0x7ff00000); 694 __ cmpl(rax, 0x7ff00000); 695 __ jcc(Assembler::notEqual, do_convert); 696 __ movl(rax, value_high_word); 697 __ andl(rax, 0xfffff); 698 __ orl(rax, value_low_word); 699 __ jcc(Assembler::notZero, return0); 700 701 __ bind(do_convert); 702 __ fnstcw(Address(rsp, 0)); 703 __ movzwl(rax, Address(rsp, 0)); 704 __ orl(rax, 0xc00); 705 __ movw(Address(rsp, 2), rax); 706 __ fldcw(Address(rsp, 2)); 707 __ fwait(); 708 __ fistp_d(result_low_word); 709 __ fldcw(Address(rsp, 0)); 710 __ fwait(); 711 // This gets the entire long in rax on 64bit 712 __ movptr(rax, result_low_word); 713 // testing of high bits 714 __ movl(rdx, result_high_word); 715 __ mov(rcx, rax); 716 // What the heck is the point of the next instruction??? 717 __ xorl(rcx, 0x0); 718 __ movl(rsi, 0x80000000); 719 __ xorl(rsi, rdx); 720 __ orl(rcx, rsi); 721 __ jcc(Assembler::notEqual, do_return); 722 __ fldz(); 723 __ fcomp_d(value_low_word); 724 __ fnstsw_ax(); 725 __ testl(rax, 0x4100); // ZF & CF == 0 726 __ jcc(Assembler::equal, return_min_jlong); 727 // return max_jlong 728 __ mov64(rax, CONST64(0x7fffffffffffffff)); 729 __ jmp(do_return); 730 731 __ bind(return_min_jlong); 732 __ mov64(rax, UCONST64(0x8000000000000000)); 733 __ jmp(do_return); 734 735 __ bind(return0); 736 __ fpop(); 737 __ xorptr(rax, rax); 738 739 __ bind(do_return); 740 __ addptr(rsp, 32); 741 LP64_ONLY(__ pop(rdx);) 742 __ pop(rcx); 743 __ pop(rsi); 744 __ ret(0); 745 } 746 break; 747 #endif // TODO 748 749 case predicate_failed_trap_id: 750 { 751 __ set_info("predicate_failed_trap", dont_gc_arguments); 752 753 OopMap* map = save_live_registers(sasm); 754 755 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 756 oop_maps = new OopMapSet(); 757 oop_maps->add_gc_map(call_offset, map); 758 restore_live_registers(sasm); 759 760 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 761 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 762 763 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution()); 764 __ z_br(Z_R1_scratch); 765 } 766 break; 767 768 default: 769 { 770 __ should_not_reach_here(FILE_AND_LINE, id); 771 } 772 break; 773 } 774 return oop_maps; 775 } 776 777 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 778 __ block_comment("generate_handle_exception"); 779 780 // incoming parameters: Z_EXC_OOP, Z_EXC_PC 781 782 // Save registers if required. 783 OopMapSet* oop_maps = new OopMapSet(); 784 OopMap* oop_map = NULL; 785 Register reg_fp = Z_R1_scratch; 786 787 switch (id) { 788 case forward_exception_id: { 789 // We're handling an exception in the context of a compiled frame. 790 // The registers have been saved in the standard places. Perform 791 // an exception lookup in the caller and dispatch to the handler 792 // if found. Otherwise unwind and dispatch to the callers 793 // exception handler. 794 oop_map = generate_oop_map(sasm); 795 796 // Load and clear pending exception oop into. 797 __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset())); 798 __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8); 799 800 // Different stubs forward their exceptions; they should all have similar frame layouts 801 // (a) to find their return address (b) for a correct oop_map generated above. 802 assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) == 803 RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement"); 804 805 // Load issuing PC (the return address for this stub). 806 const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 807 __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc))); 808 DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));) 809 810 // Make sure that the vm_results are cleared (may be unnecessary). 811 __ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(oop)); 812 __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*)); 813 break; 814 } 815 case handle_exception_nofpu_id: 816 case handle_exception_id: 817 // At this point all registers MAY be live. 818 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 819 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC); 820 break; 821 case handle_exception_from_callee_id: { 822 // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead. 823 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 824 __ save_return_pc(Z_EXC_PC); 825 const int frame_size_in_bytes = __ push_frame_abi160(0); 826 oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0); 827 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 828 break; 829 } 830 default: ShouldNotReachHere(); 831 } 832 833 // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time. 834 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp); 835 // Verify that Z_EXC_OOP, contains a valid exception. 836 __ verify_not_null_oop(Z_EXC_OOP); 837 838 // Check that fields in JavaThread for exception oop and issuing pc 839 // are empty before writing to them. 840 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 841 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 842 843 // Save exception oop and issuing pc into JavaThread. 844 // (Exception handler will load it from here.) 845 __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset())); 846 __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset())); 847 848 #ifdef ASSERT 849 { NearLabel ok; 850 __ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc))); 851 __ branch_optimized(Assembler::bcondEqual, ok); 852 __ stop("use throwing pc as return address (has bci & oop map)"); 853 __ bind(ok); 854 } 855 #endif 856 857 // Compute the exception handler. 858 // The exception oop and the throwing pc are read from the fields in JavaThread. 859 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 860 oop_maps->add_gc_map(call_offset, oop_map); 861 862 // Z_RET(Z_R2): handler address 863 // will be the deopt blob if nmethod was deoptimized while we looked up 864 // handler regardless of whether handler existed in the nmethod. 865 866 // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call. 867 __ invalidate_registers(Z_R2); 868 869 switch(id) { 870 case forward_exception_id: 871 case handle_exception_nofpu_id: 872 case handle_exception_id: 873 // Restore the registers that were saved at the beginning. 874 __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2. 875 restore_live_registers(sasm, id != handle_exception_nofpu_id); // Pops as well the frame. 876 __ z_br(Z_R1_scratch); 877 break; 878 case handle_exception_from_callee_id: { 879 __ pop_frame(); 880 __ z_br(Z_R2); // Jump to exception handler. 881 } 882 break; 883 default: ShouldNotReachHere(); 884 } 885 886 return oop_maps; 887 } 888 889 890 #undef __ 891 892 const char *Runtime1::pd_name_for_address(address entry) { 893 return "<unknown function>"; 894 }