1 /* 2 * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "c1/c1_Defs.hpp" 28 #include "c1/c1_MacroAssembler.hpp" 29 #include "c1/c1_Runtime1.hpp" 30 #include "ci/ciUtilities.hpp" 31 #include "gc/shared/cardTable.hpp" 32 #include "gc/shared/cardTableBarrierSet.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "nativeInst_s390.hpp" 35 #include "oops/compiledICHolder.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "prims/jvmtiExport.hpp" 38 #include "register_s390.hpp" 39 #include "runtime/sharedRuntime.hpp" 40 #include "runtime/signature.hpp" 41 #include "runtime/vframeArray.hpp" 42 #include "utilities/macros.hpp" 43 #include "vmreg_s390.inline.hpp" 44 #include "registerSaver_s390.hpp" 45 46 // Implementation of StubAssembler 47 48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry_point, int number_of_arguments) { 49 set_num_rt_args(0); // Nothing on stack. 50 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); 51 52 // We cannot trust that code generated by the C++ compiler saves R14 53 // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at 54 // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()). 55 // Therefore we load the PC into Z_R1_scratch and let set_last_Java_frame() save 56 // it into the frame anchor. 57 address pc = get_PC(Z_R1_scratch); 58 int call_offset = (int)(pc - addr_at(0)); 59 set_last_Java_frame(Z_SP, Z_R1_scratch); 60 61 // ARG1 must hold thread address. 62 z_lgr(Z_ARG1, Z_thread); 63 64 address return_pc = NULL; 65 align_call_far_patchable(this->pc()); 66 return_pc = call_c_opt(entry_point); 67 assert(return_pc != NULL, "const section overflow"); 68 69 reset_last_Java_frame(); 70 71 // Check for pending exceptions. 72 { 73 load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset())); 74 75 // This used to conditionally jump to forward_exception however it is 76 // possible if we relocate that the branch will not reach. So we must jump 77 // around so we can always reach. 78 79 Label ok; 80 z_bre(ok); // Bcondequal is the same as bcondZero. 81 82 // exception pending => forward to exception handler 83 84 // Make sure that the vm_results are cleared. 85 if (oop_result1->is_valid()) { 86 clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(jlong)); 87 } 88 if (metadata_result->is_valid()) { 89 clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(jlong)); 90 } 91 if (frame_size() == no_frame_size) { 92 // Pop the stub frame. 93 pop_frame(); 94 restore_return_pc(); 95 load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 96 z_br(Z_R1); 97 } else if (_stub_id == Runtime1::forward_exception_id) { 98 should_not_reach_here(); 99 } else { 100 load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id)); 101 z_br(Z_R1); 102 } 103 104 bind(ok); 105 } 106 107 // Get oop results if there are any and reset the values in the thread. 108 if (oop_result1->is_valid()) { 109 get_vm_result(oop_result1); 110 } 111 if (metadata_result->is_valid()) { 112 get_vm_result_2(metadata_result); 113 } 114 115 return call_offset; 116 } 117 118 119 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { 120 // Z_ARG1 is reserved for the thread. 121 lgr_if_needed(Z_ARG2, arg1); 122 return call_RT(oop_result1, metadata_result, entry, 1); 123 } 124 125 126 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { 127 // Z_ARG1 is reserved for the thread. 128 lgr_if_needed(Z_ARG2, arg1); 129 assert(arg2 != Z_ARG2, "smashed argument"); 130 lgr_if_needed(Z_ARG3, arg2); 131 return call_RT(oop_result1, metadata_result, entry, 2); 132 } 133 134 135 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { 136 // Z_ARG1 is reserved for the thread. 137 lgr_if_needed(Z_ARG2, arg1); 138 assert(arg2 != Z_ARG2, "smashed argument"); 139 lgr_if_needed(Z_ARG3, arg2); 140 assert(arg3 != Z_ARG3, "smashed argument"); 141 lgr_if_needed(Z_ARG4, arg3); 142 return call_RT(oop_result1, metadata_result, entry, 3); 143 } 144 145 146 // Implementation of Runtime1 147 148 #define __ sasm-> 149 150 #ifndef PRODUCT 151 #undef __ 152 #define __ (Verbose ? (sasm->block_comment(FILE_AND_LINE),sasm):sasm)-> 153 #endif // !PRODUCT 154 155 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 156 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 157 158 static OopMap* generate_oop_map(StubAssembler* sasm) { 159 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers; 160 int frame_size_in_slots = 161 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 162 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 163 return RegisterSaver::generate_oop_map(sasm, reg_set); 164 } 165 166 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true, Register return_pc = Z_R14) { 167 __ block_comment("save_live_registers"); 168 RegisterSaver::RegisterSet reg_set = 169 save_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 170 int frame_size_in_slots = 171 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 172 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 173 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc); 174 } 175 176 static OopMap* save_live_registers_except_r2(StubAssembler* sasm, bool save_fpu_registers = true) { 177 if (!save_fpu_registers) { 178 __ unimplemented(FILE_AND_LINE); 179 } 180 __ block_comment("save_live_registers"); 181 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_registers_except_r2; 182 int frame_size_in_slots = 183 RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size; 184 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); 185 return RegisterSaver::save_live_registers(sasm, reg_set); 186 } 187 188 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { 189 __ block_comment("restore_live_registers"); 190 RegisterSaver::RegisterSet reg_set = 191 restore_fpu_registers ? RegisterSaver::all_registers : RegisterSaver::all_integer_registers; 192 RegisterSaver::restore_live_registers(sasm, reg_set); 193 } 194 195 static void restore_live_registers_except_r2(StubAssembler* sasm, bool restore_fpu_registers = true) { 196 if (!restore_fpu_registers) { 197 __ unimplemented(FILE_AND_LINE); 198 } 199 __ block_comment("restore_live_registers_except_r2"); 200 RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2); 201 } 202 203 void Runtime1::initialize_pd() { 204 // Nothing to do. 205 } 206 207 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { 208 // Make a frame and preserve the caller's caller-save registers. 209 OopMap* oop_map = save_live_registers(sasm); 210 int call_offset; 211 if (!has_argument) { 212 call_offset = __ call_RT(noreg, noreg, target); 213 } else { 214 call_offset = __ call_RT(noreg, noreg, target, Z_R1_scratch, Z_R0_scratch); 215 } 216 OopMapSet* oop_maps = new OopMapSet(); 217 oop_maps->add_gc_map(call_offset, oop_map); 218 219 __ should_not_reach_here(); 220 return oop_maps; 221 } 222 223 void Runtime1::generate_unwind_exception(StubAssembler *sasm) { 224 // Incoming parameters: Z_EXC_OOP and Z_EXC_PC. 225 // Keep copies in callee-saved registers during runtime call. 226 const Register exception_oop_callee_saved = Z_R11; 227 const Register exception_pc_callee_saved = Z_R12; 228 // Other registers used in this stub. 229 const Register handler_addr = Z_R4; 230 231 // Verify that only exception_oop, is valid at this time. 232 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC); 233 234 // Check that fields in JavaThread for exception oop and issuing pc are set. 235 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 236 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 237 238 // Save exception_oop and pc in callee-saved register to preserve it 239 // during runtime calls. 240 __ verify_not_null_oop(Z_EXC_OOP); 241 __ lgr_if_needed(exception_oop_callee_saved, Z_EXC_OOP); 242 __ lgr_if_needed(exception_pc_callee_saved, Z_EXC_PC); 243 244 __ push_frame_abi160(0); // Runtime code needs the z_abi_160. 245 246 // Search the exception handler address of the caller (using the return address). 247 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), Z_thread, Z_EXC_PC); 248 // Z_RET(Z_R2): exception handler address of the caller. 249 250 __ pop_frame(); 251 252 __ invalidate_registers(exception_oop_callee_saved, exception_pc_callee_saved, Z_RET); 253 254 // Move result of call into correct register. 255 __ lgr_if_needed(handler_addr, Z_RET); 256 257 // Restore exception oop and pc to Z_EXC_OOP and Z_EXC_PC (required convention of exception handler). 258 __ lgr_if_needed(Z_EXC_OOP, exception_oop_callee_saved); 259 __ lgr_if_needed(Z_EXC_PC, exception_pc_callee_saved); 260 261 // Verify that there is really a valid exception in Z_EXC_OOP. 262 __ verify_not_null_oop(Z_EXC_OOP); 263 264 __ z_br(handler_addr); // Jump to exception handler. 265 } 266 267 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { 268 // Make a frame and preserve the caller's caller-save registers. 269 OopMap* oop_map = save_live_registers(sasm); 270 271 // Call the runtime patching routine, returns non-zero if nmethod got deopted. 272 int call_offset = __ call_RT(noreg, noreg, target); 273 OopMapSet* oop_maps = new OopMapSet(); 274 oop_maps->add_gc_map(call_offset, oop_map); 275 276 // Re-execute the patched instruction or, if the nmethod was 277 // deoptmized, return to the deoptimization handler entry that will 278 // cause re-execution of the current bytecode. 279 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 280 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 281 282 __ z_ltr(Z_RET, Z_RET); // return value == 0 283 284 restore_live_registers(sasm); 285 286 __ z_bcr(Assembler::bcondZero, Z_R14); 287 288 // Return to the deoptimization handler entry for unpacking and 289 // rexecute if we simply returned then we'd deopt as if any call we 290 // patched had just returned. 291 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 292 __ load_const_optimized(Z_R1_scratch, dest); 293 __ z_br(Z_R1_scratch); 294 295 return oop_maps; 296 } 297 298 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { 299 300 // for better readability 301 const bool must_gc_arguments = true; 302 const bool dont_gc_arguments = false; 303 304 // Default value; overwritten for some optimized stubs that are 305 // called from methods that do not use the fpu. 306 bool save_fpu_registers = true; 307 308 // Stub code and info for the different stubs. 309 OopMapSet* oop_maps = NULL; 310 switch (id) { 311 case forward_exception_id: 312 { 313 oop_maps = generate_handle_exception(id, sasm); 314 // will not return 315 } 316 break; 317 318 case new_instance_id: 319 case fast_new_instance_id: 320 case fast_new_instance_init_check_id: 321 { 322 Register klass = Z_R11; // Incoming 323 Register obj = Z_R2; // Result 324 325 if (id == new_instance_id) { 326 __ set_info("new_instance", dont_gc_arguments); 327 } else if (id == fast_new_instance_id) { 328 __ set_info("fast new_instance", dont_gc_arguments); 329 } else { 330 assert(id == fast_new_instance_init_check_id, "bad StubID"); 331 __ set_info("fast new_instance init check", dont_gc_arguments); 332 } 333 334 OopMap* map = save_live_registers_except_r2(sasm); 335 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); 336 oop_maps = new OopMapSet(); 337 oop_maps->add_gc_map(call_offset, map); 338 restore_live_registers_except_r2(sasm); 339 340 __ verify_oop(obj); 341 __ z_br(Z_R14); 342 } 343 break; 344 345 case counter_overflow_id: 346 { 347 // Arguments : 348 // bci : stack param 0 349 // method : stack param 1 350 // 351 Register bci = Z_ARG2, method = Z_ARG3; 352 // frame size in bytes 353 OopMap* map = save_live_registers(sasm); 354 const int frame_size = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 355 __ z_lg(bci, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 356 __ z_lg(method, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 357 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); 358 oop_maps = new OopMapSet(); 359 oop_maps->add_gc_map(call_offset, map); 360 restore_live_registers(sasm); 361 __ z_br(Z_R14); 362 } 363 break; 364 case new_type_array_id: 365 case new_object_array_id: 366 { 367 Register length = Z_R13; // Incoming 368 Register klass = Z_R11; // Incoming 369 Register obj = Z_R2; // Result 370 371 if (id == new_type_array_id) { 372 __ set_info("new_type_array", dont_gc_arguments); 373 } else { 374 __ set_info("new_object_array", dont_gc_arguments); 375 } 376 377 #ifdef ASSERT 378 // Assert object type is really an array of the proper kind. 379 { 380 NearLabel ok; 381 Register t0 = obj; 382 __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false); 383 __ z_sra(t0, Klass::_lh_array_tag_shift); 384 int tag = ((id == new_type_array_id) 385 ? Klass::_lh_array_tag_type_value 386 : Klass::_lh_array_tag_obj_value); 387 __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok); 388 __ stop("assert(is an array klass)"); 389 __ should_not_reach_here(); 390 __ bind(ok); 391 } 392 #endif // ASSERT 393 394 OopMap* map = save_live_registers_except_r2(sasm); 395 int call_offset; 396 if (id == new_type_array_id) { 397 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); 398 } else { 399 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); 400 } 401 402 oop_maps = new OopMapSet(); 403 oop_maps->add_gc_map(call_offset, map); 404 restore_live_registers_except_r2(sasm); 405 406 __ verify_oop(obj); 407 __ z_br(Z_R14); 408 } 409 break; 410 411 case new_multi_array_id: 412 { __ set_info("new_multi_array", dont_gc_arguments); 413 // Z_R3,: klass 414 // Z_R4,: rank 415 // Z_R5: address of 1st dimension 416 OopMap* map = save_live_registers(sasm); 417 int call_offset = __ call_RT(Z_R2, noreg, CAST_FROM_FN_PTR(address, new_multi_array), Z_R3, Z_R4, Z_R5); 418 419 oop_maps = new OopMapSet(); 420 oop_maps->add_gc_map(call_offset, map); 421 restore_live_registers_except_r2(sasm); 422 423 // Z_R2,: new multi array 424 __ verify_oop(Z_R2); 425 __ z_br(Z_R14); 426 } 427 break; 428 429 case register_finalizer_id: 430 { 431 __ set_info("register_finalizer", dont_gc_arguments); 432 433 // Load the klass and check the has finalizer flag. 434 Register klass = Z_ARG2; 435 __ load_klass(klass, Z_ARG1); 436 __ testbit(Address(klass, Klass::access_flags_offset()), exact_log2(JVM_ACC_HAS_FINALIZER)); 437 __ z_bcr(Assembler::bcondAllZero, Z_R14); // Return if bit is not set. 438 439 OopMap* oop_map = save_live_registers(sasm); 440 int call_offset = __ call_RT(noreg, noreg, 441 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), Z_ARG1); 442 oop_maps = new OopMapSet(); 443 oop_maps->add_gc_map(call_offset, oop_map); 444 445 // Now restore all the live registers. 446 restore_live_registers(sasm); 447 448 __ z_br(Z_R14); 449 } 450 break; 451 452 case throw_range_check_failed_id: 453 { __ set_info("range_check_failed", dont_gc_arguments); 454 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); 455 } 456 break; 457 458 case throw_index_exception_id: 459 { __ set_info("index_range_check_failed", dont_gc_arguments); 460 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); 461 } 462 break; 463 case throw_div0_exception_id: 464 { __ set_info("throw_div0_exception", dont_gc_arguments); 465 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); 466 } 467 break; 468 case throw_null_pointer_exception_id: 469 { __ set_info("throw_null_pointer_exception", dont_gc_arguments); 470 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); 471 } 472 break; 473 case handle_exception_nofpu_id: 474 case handle_exception_id: 475 { __ set_info("handle_exception", dont_gc_arguments); 476 oop_maps = generate_handle_exception(id, sasm); 477 } 478 break; 479 case handle_exception_from_callee_id: 480 { __ set_info("handle_exception_from_callee", dont_gc_arguments); 481 oop_maps = generate_handle_exception(id, sasm); 482 } 483 break; 484 case unwind_exception_id: 485 { __ set_info("unwind_exception", dont_gc_arguments); 486 // Note: no stubframe since we are about to leave the current 487 // activation and we are calling a leaf VM function only. 488 generate_unwind_exception(sasm); 489 } 490 break; 491 case throw_array_store_exception_id: 492 { __ set_info("throw_array_store_exception", dont_gc_arguments); 493 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); 494 } 495 break; 496 case throw_class_cast_exception_id: 497 { // Z_R1_scratch: object 498 __ set_info("throw_class_cast_exception", dont_gc_arguments); 499 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); 500 } 501 break; 502 case throw_incompatible_class_change_error_id: 503 { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); 504 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); 505 } 506 break; 507 case slow_subtype_check_id: 508 { 509 // Arguments : 510 // sub : stack param 0 511 // super: stack param 1 512 // raddr: Z_R14, blown by call 513 // 514 // Result : condition code 0 for match (bcondEqual will be true), 515 // condition code 2 for miss (bcondNotEqual will be true) 516 NearLabel miss; 517 const Register Rsubklass = Z_ARG2; // sub 518 const Register Rsuperklass = Z_ARG3; // super 519 520 // No args, but tmp registers that are killed. 521 const Register Rlength = Z_ARG4; // cache array length 522 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 523 524 if (UseCompressedOops) { 525 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); 526 } 527 528 const int frame_size = 4*BytesPerWord + frame::z_abi_160_size; 529 // Save return pc. This is not necessary, but could be helpful 530 // in the case of crashes. 531 __ save_return_pc(); 532 __ push_frame(frame_size); 533 // Save registers before changing them. 534 int i = 0; 535 __ z_stg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 536 __ z_stg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 537 __ z_stg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 538 __ z_stg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 539 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 540 541 // Get sub and super from stack. 542 __ z_lg(Rsubklass, 0*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 543 __ z_lg(Rsuperklass, 1*BytesPerWord + FrameMap::first_available_sp_in_frame + frame_size, Z_SP); 544 545 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, Rarray_ptr, Rlength, NULL, &miss); 546 547 // Match falls through here. 548 i = 0; 549 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 550 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 551 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 552 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 553 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 554 __ pop_frame(); 555 // Return pc is still in R_14. 556 __ clear_reg(Z_R0_scratch); // Zero indicates a match. Set CC 0 (bcondEqual will be true) 557 __ z_br(Z_R14); 558 559 __ BIND(miss); 560 i = 0; 561 __ z_lg(Rsubklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 562 __ z_lg(Rsuperklass, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 563 __ z_lg(Rlength, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 564 __ z_lg(Rarray_ptr, (i++)*BytesPerWord + frame::z_abi_160_size, Z_SP); 565 assert(i*BytesPerWord + frame::z_abi_160_size == frame_size, "check"); 566 __ pop_frame(); 567 // return pc is still in R_14 568 __ load_const_optimized(Z_R0_scratch, 1); // One indicates a miss. 569 __ z_ltgr(Z_R0_scratch, Z_R0_scratch); // Set CC 2 (bcondNotEqual will be true). 570 __ z_br(Z_R14); 571 } 572 break; 573 case monitorenter_nofpu_id: 574 case monitorenter_id: 575 { // Z_R1_scratch : object 576 // Z_R13 : lock address (see LIRGenerator::syncTempOpr()) 577 __ set_info("monitorenter", dont_gc_arguments); 578 579 int save_fpu_registers = (id == monitorenter_id); 580 // Make a frame and preserve the caller's caller-save registers. 581 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 582 583 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), Z_R1_scratch, Z_R13); 584 585 oop_maps = new OopMapSet(); 586 oop_maps->add_gc_map(call_offset, oop_map); 587 restore_live_registers(sasm, save_fpu_registers); 588 589 __ z_br(Z_R14); 590 } 591 break; 592 593 case monitorexit_nofpu_id: 594 case monitorexit_id: 595 { // Z_R1_scratch : lock address 596 // Note: really a leaf routine but must setup last java sp 597 // => Use call_RT for now (speed can be improved by 598 // doing last java sp setup manually). 599 __ set_info("monitorexit", dont_gc_arguments); 600 601 int save_fpu_registers = (id == monitorexit_id); 602 // Make a frame and preserve the caller's caller-save registers. 603 OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); 604 605 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), Z_R1_scratch); 606 607 oop_maps = new OopMapSet(); 608 oop_maps->add_gc_map(call_offset, oop_map); 609 restore_live_registers(sasm, save_fpu_registers); 610 611 __ z_br(Z_R14); 612 } 613 break; 614 615 case deoptimize_id: 616 { // Args: Z_R1_scratch: trap request 617 __ set_info("deoptimize", dont_gc_arguments); 618 Register trap_request = Z_R1_scratch; 619 OopMap* oop_map = save_live_registers(sasm); 620 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), trap_request); 621 oop_maps = new OopMapSet(); 622 oop_maps->add_gc_map(call_offset, oop_map); 623 restore_live_registers(sasm); 624 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 625 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 626 AddressLiteral dest(deopt_blob->unpack_with_reexecution()); 627 __ load_const_optimized(Z_R1_scratch, dest); 628 __ z_br(Z_R1_scratch); 629 } 630 break; 631 632 case access_field_patching_id: 633 { __ set_info("access_field_patching", dont_gc_arguments); 634 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); 635 } 636 break; 637 638 case load_klass_patching_id: 639 { __ set_info("load_klass_patching", dont_gc_arguments); 640 // We should set up register map. 641 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); 642 } 643 break; 644 645 case load_mirror_patching_id: 646 { __ set_info("load_mirror_patching", dont_gc_arguments); 647 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); 648 } 649 break; 650 651 case load_appendix_patching_id: 652 { __ set_info("load_appendix_patching", dont_gc_arguments); 653 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); 654 } 655 break; 656 #if 0 657 case dtrace_object_alloc_id: 658 { // rax,: object 659 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); 660 // We can't gc here so skip the oopmap but make sure that all 661 // the live registers get saved. 662 save_live_registers(sasm, 1); 663 664 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); 665 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); 666 NOT_LP64(__ pop(rax)); 667 668 restore_live_registers(sasm); 669 } 670 break; 671 672 case fpu2long_stub_id: 673 { 674 // rax, and rdx are destroyed, but should be free since the result is returned there 675 // preserve rsi,ecx 676 __ push(rsi); 677 __ push(rcx); 678 LP64_ONLY(__ push(rdx);) 679 680 // check for NaN 681 Label return0, do_return, return_min_jlong, do_convert; 682 683 Address value_high_word(rsp, wordSize + 4); 684 Address value_low_word(rsp, wordSize); 685 Address result_high_word(rsp, 3*wordSize + 4); 686 Address result_low_word(rsp, 3*wordSize); 687 688 __ subptr(rsp, 32); // more than enough on 32bit 689 __ fst_d(value_low_word); 690 __ movl(rax, value_high_word); 691 __ andl(rax, 0x7ff00000); 692 __ cmpl(rax, 0x7ff00000); 693 __ jcc(Assembler::notEqual, do_convert); 694 __ movl(rax, value_high_word); 695 __ andl(rax, 0xfffff); 696 __ orl(rax, value_low_word); 697 __ jcc(Assembler::notZero, return0); 698 699 __ bind(do_convert); 700 __ fnstcw(Address(rsp, 0)); 701 __ movzwl(rax, Address(rsp, 0)); 702 __ orl(rax, 0xc00); 703 __ movw(Address(rsp, 2), rax); 704 __ fldcw(Address(rsp, 2)); 705 __ fwait(); 706 __ fistp_d(result_low_word); 707 __ fldcw(Address(rsp, 0)); 708 __ fwait(); 709 // This gets the entire long in rax on 64bit 710 __ movptr(rax, result_low_word); 711 // testing of high bits 712 __ movl(rdx, result_high_word); 713 __ mov(rcx, rax); 714 // What the heck is the point of the next instruction??? 715 __ xorl(rcx, 0x0); 716 __ movl(rsi, 0x80000000); 717 __ xorl(rsi, rdx); 718 __ orl(rcx, rsi); 719 __ jcc(Assembler::notEqual, do_return); 720 __ fldz(); 721 __ fcomp_d(value_low_word); 722 __ fnstsw_ax(); 723 __ testl(rax, 0x4100); // ZF & CF == 0 724 __ jcc(Assembler::equal, return_min_jlong); 725 // return max_jlong 726 __ mov64(rax, CONST64(0x7fffffffffffffff)); 727 __ jmp(do_return); 728 729 __ bind(return_min_jlong); 730 __ mov64(rax, UCONST64(0x8000000000000000)); 731 __ jmp(do_return); 732 733 __ bind(return0); 734 __ fpop(); 735 __ xorptr(rax, rax); 736 737 __ bind(do_return); 738 __ addptr(rsp, 32); 739 LP64_ONLY(__ pop(rdx);) 740 __ pop(rcx); 741 __ pop(rsi); 742 __ ret(0); 743 } 744 break; 745 #endif // TODO 746 747 case predicate_failed_trap_id: 748 { 749 __ set_info("predicate_failed_trap", dont_gc_arguments); 750 751 OopMap* map = save_live_registers(sasm); 752 753 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); 754 oop_maps = new OopMapSet(); 755 oop_maps->add_gc_map(call_offset, map); 756 restore_live_registers(sasm); 757 758 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); 759 assert(deopt_blob != NULL, "deoptimization blob must have been created"); 760 761 __ load_const_optimized(Z_R1_scratch, deopt_blob->unpack_with_reexecution()); 762 __ z_br(Z_R1_scratch); 763 } 764 break; 765 766 default: 767 { 768 __ should_not_reach_here(FILE_AND_LINE, id); 769 } 770 break; 771 } 772 return oop_maps; 773 } 774 775 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 776 __ block_comment("generate_handle_exception"); 777 778 // incoming parameters: Z_EXC_OOP, Z_EXC_PC 779 780 // Save registers if required. 781 OopMapSet* oop_maps = new OopMapSet(); 782 OopMap* oop_map = NULL; 783 Register reg_fp = Z_R1_scratch; 784 785 switch (id) { 786 case forward_exception_id: { 787 // We're handling an exception in the context of a compiled frame. 788 // The registers have been saved in the standard places. Perform 789 // an exception lookup in the caller and dispatch to the handler 790 // if found. Otherwise unwind and dispatch to the callers 791 // exception handler. 792 oop_map = generate_oop_map(sasm); 793 794 // Load and clear pending exception oop into. 795 __ z_lg(Z_EXC_OOP, Address(Z_thread, Thread::pending_exception_offset())); 796 __ clear_mem(Address(Z_thread, Thread::pending_exception_offset()), 8); 797 798 // Different stubs forward their exceptions; they should all have similar frame layouts 799 // (a) to find their return address (b) for a correct oop_map generated above. 800 assert(RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers) == 801 RegisterSaver::live_reg_frame_size(RegisterSaver::all_registers_except_r2), "requirement"); 802 803 // Load issuing PC (the return address for this stub). 804 const int frame_size_in_bytes = sasm->frame_size() * VMRegImpl::slots_per_word * VMRegImpl::stack_slot_size; 805 __ z_lg(Z_EXC_PC, Address(Z_SP, frame_size_in_bytes + _z_abi16(return_pc))); 806 DEBUG_ONLY(__ z_lay(reg_fp, Address(Z_SP, frame_size_in_bytes));) 807 808 // Make sure that the vm_results are cleared (may be unnecessary). 809 __ clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(oop)); 810 __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*)); 811 break; 812 } 813 case handle_exception_nofpu_id: 814 case handle_exception_id: 815 // At this point all registers MAY be live. 816 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 817 oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC); 818 break; 819 case handle_exception_from_callee_id: { 820 // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead. 821 DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);) 822 __ save_return_pc(Z_EXC_PC); 823 const int frame_size_in_bytes = __ push_frame_abi160(0); 824 oop_map = new OopMap(frame_size_in_bytes / VMRegImpl::stack_slot_size, 0); 825 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 826 break; 827 } 828 default: ShouldNotReachHere(); 829 } 830 831 // Verify that only Z_EXC_OOP, and Z_EXC_PC are valid at this time. 832 __ invalidate_registers(Z_EXC_OOP, Z_EXC_PC, reg_fp); 833 // Verify that Z_EXC_OOP, contains a valid exception. 834 __ verify_not_null_oop(Z_EXC_OOP); 835 836 // Check that fields in JavaThread for exception oop and issuing pc 837 // are empty before writing to them. 838 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_oop_offset()), Z_thread, "exception oop already set : " FILE_AND_LINE, 0); 839 __ asm_assert_mem8_is_zero(in_bytes(JavaThread::exception_pc_offset()), Z_thread, "exception pc already set : " FILE_AND_LINE, 0); 840 841 // Save exception oop and issuing pc into JavaThread. 842 // (Exception handler will load it from here.) 843 __ z_stg(Z_EXC_OOP, Address(Z_thread, JavaThread::exception_oop_offset())); 844 __ z_stg(Z_EXC_PC, Address(Z_thread, JavaThread::exception_pc_offset())); 845 846 #ifdef ASSERT 847 { NearLabel ok; 848 __ z_cg(Z_EXC_PC, Address(reg_fp, _z_abi16(return_pc))); 849 __ branch_optimized(Assembler::bcondEqual, ok); 850 __ stop("use throwing pc as return address (has bci & oop map)"); 851 __ bind(ok); 852 } 853 #endif 854 855 // Compute the exception handler. 856 // The exception oop and the throwing pc are read from the fields in JavaThread. 857 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 858 oop_maps->add_gc_map(call_offset, oop_map); 859 860 // Z_RET(Z_R2): handler address 861 // will be the deopt blob if nmethod was deoptimized while we looked up 862 // handler regardless of whether handler existed in the nmethod. 863 864 // Only Z_R2, is valid at this time, all other registers have been destroyed by the runtime call. 865 __ invalidate_registers(Z_R2); 866 867 switch(id) { 868 case forward_exception_id: 869 case handle_exception_nofpu_id: 870 case handle_exception_id: 871 // Restore the registers that were saved at the beginning. 872 __ z_lgr(Z_R1_scratch, Z_R2); // Restoring live registers kills Z_R2. 873 restore_live_registers(sasm, id != handle_exception_nofpu_id); // Pops as well the frame. 874 __ z_br(Z_R1_scratch); 875 break; 876 case handle_exception_from_callee_id: { 877 __ pop_frame(); 878 __ z_br(Z_R2); // Jump to exception handler. 879 } 880 break; 881 default: ShouldNotReachHere(); 882 } 883 884 return oop_maps; 885 } 886 887 888 #undef __ 889 890 const char *Runtime1::pd_name_for_address(address entry) { 891 return "<unknown function>"; 892 }