1 /* 2 * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "registerSaver_s390.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/interp_masm.hpp" 31 #include "nativeInst_s390.hpp" 32 #include "oops/instanceOop.hpp" 33 #include "oops/objArrayKlass.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "prims/methodHandles.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/sharedRuntime.hpp" 39 #include "runtime/stubCodeGenerator.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/thread.inline.hpp" 42 43 // Declaration and definition of StubGenerator (no .hpp file). 44 // For a more detailed description of the stub routine structure 45 // see the comment in stubRoutines.hpp. 46 47 #ifdef PRODUCT 48 #define __ _masm-> 49 #else 50 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 51 #endif 52 53 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str) 54 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 55 56 // ----------------------------------------------------------------------- 57 // Stub Code definitions 58 59 class StubGenerator: public StubCodeGenerator { 60 private: 61 62 //---------------------------------------------------------------------- 63 // Call stubs are used to call Java from C. 64 65 // 66 // Arguments: 67 // 68 // R2 - call wrapper address : address 69 // R3 - result : intptr_t* 70 // R4 - result type : BasicType 71 // R5 - method : method 72 // R6 - frame mgr entry point : address 73 // [SP+160] - parameter block : intptr_t* 74 // [SP+172] - parameter count in words : int 75 // [SP+176] - thread : Thread* 76 // 77 address generate_call_stub(address& return_address) { 78 // Set up a new C frame, copy Java arguments, call frame manager 79 // or native_entry, and process result. 80 81 StubCodeMark mark(this, "StubRoutines", "call_stub"); 82 address start = __ pc(); 83 84 Register r_arg_call_wrapper_addr = Z_ARG1; 85 Register r_arg_result_addr = Z_ARG2; 86 Register r_arg_result_type = Z_ARG3; 87 Register r_arg_method = Z_ARG4; 88 Register r_arg_entry = Z_ARG5; 89 90 // offsets to fp 91 #define d_arg_thread 176 92 #define d_arg_argument_addr 160 93 #define d_arg_argument_count 168+4 94 95 Register r_entryframe_fp = Z_tmp_1; 96 Register r_top_of_arguments_addr = Z_ARG4; 97 Register r_new_arg_entry = Z_R14; 98 99 // macros for frame offsets 100 #define call_wrapper_address_offset \ 101 _z_entry_frame_locals_neg(call_wrapper_address) 102 #define result_address_offset \ 103 _z_entry_frame_locals_neg(result_address) 104 #define result_type_offset \ 105 _z_entry_frame_locals_neg(result_type) 106 #define arguments_tos_address_offset \ 107 _z_entry_frame_locals_neg(arguments_tos_address) 108 109 { 110 // 111 // STACK on entry to call_stub: 112 // 113 // F1 [C_FRAME] 114 // ... 115 // 116 117 Register r_argument_addr = Z_tmp_3; 118 Register r_argumentcopy_addr = Z_tmp_4; 119 Register r_argument_size_in_bytes = Z_ARG5; 120 Register r_frame_size = Z_R1; 121 122 Label arguments_copied; 123 124 // Save non-volatile registers to ABI of caller frame. 125 BLOCK_COMMENT("save registers, push frame {"); 126 __ z_stmg(Z_R6, Z_R14, 16, Z_SP); 127 __ z_std(Z_F8, 96, Z_SP); 128 __ z_std(Z_F9, 104, Z_SP); 129 __ z_std(Z_F10, 112, Z_SP); 130 __ z_std(Z_F11, 120, Z_SP); 131 __ z_std(Z_F12, 128, Z_SP); 132 __ z_std(Z_F13, 136, Z_SP); 133 __ z_std(Z_F14, 144, Z_SP); 134 __ z_std(Z_F15, 152, Z_SP); 135 136 // 137 // Push ENTRY_FRAME including arguments: 138 // 139 // F0 [TOP_IJAVA_FRAME_ABI] 140 // [outgoing Java arguments] 141 // [ENTRY_FRAME_LOCALS] 142 // F1 [C_FRAME] 143 // ... 144 // 145 146 // Calculate new frame size and push frame. 147 #define abi_plus_locals_size \ 148 (frame::z_top_ijava_frame_abi_size + frame::z_entry_frame_locals_size) 149 if (abi_plus_locals_size % BytesPerWord == 0) { 150 // Preload constant part of frame size. 151 __ load_const_optimized(r_frame_size, -abi_plus_locals_size/BytesPerWord); 152 // Keep copy of our frame pointer (caller's SP). 153 __ z_lgr(r_entryframe_fp, Z_SP); 154 // Add space required by arguments to frame size. 155 __ z_slgf(r_frame_size, d_arg_argument_count, Z_R0, Z_SP); 156 // Move Z_ARG5 early, it will be used as a local. 157 __ z_lgr(r_new_arg_entry, r_arg_entry); 158 // Convert frame size from words to bytes. 159 __ z_sllg(r_frame_size, r_frame_size, LogBytesPerWord); 160 __ push_frame(r_frame_size, r_entryframe_fp, 161 false/*don't copy SP*/, true /*frame size sign inverted*/); 162 } else { 163 guarantee(false, "frame sizes should be multiples of word size (BytesPerWord)"); 164 } 165 BLOCK_COMMENT("} save, push"); 166 167 // Load argument registers for call. 168 BLOCK_COMMENT("prepare/copy arguments {"); 169 __ z_lgr(Z_method, r_arg_method); 170 __ z_lg(Z_thread, d_arg_thread, r_entryframe_fp); 171 172 // Calculate top_of_arguments_addr which will be tos (not prepushed) later. 173 // Wimply use SP + frame::top_ijava_frame_size. 174 __ add2reg(r_top_of_arguments_addr, 175 frame::z_top_ijava_frame_abi_size - BytesPerWord, Z_SP); 176 177 // Initialize call_stub locals (step 1). 178 if ((call_wrapper_address_offset + BytesPerWord == result_address_offset) && 179 (result_address_offset + BytesPerWord == result_type_offset) && 180 (result_type_offset + BytesPerWord == arguments_tos_address_offset)) { 181 182 __ z_stmg(r_arg_call_wrapper_addr, r_top_of_arguments_addr, 183 call_wrapper_address_offset, r_entryframe_fp); 184 } else { 185 __ z_stg(r_arg_call_wrapper_addr, 186 call_wrapper_address_offset, r_entryframe_fp); 187 __ z_stg(r_arg_result_addr, 188 result_address_offset, r_entryframe_fp); 189 __ z_stg(r_arg_result_type, 190 result_type_offset, r_entryframe_fp); 191 __ z_stg(r_top_of_arguments_addr, 192 arguments_tos_address_offset, r_entryframe_fp); 193 } 194 195 // Copy Java arguments. 196 197 // Any arguments to copy? 198 __ load_and_test_int2long(Z_R1, Address(r_entryframe_fp, d_arg_argument_count)); 199 __ z_bre(arguments_copied); 200 201 // Prepare loop and copy arguments in reverse order. 202 { 203 // Calculate argument size in bytes. 204 __ z_sllg(r_argument_size_in_bytes, Z_R1, LogBytesPerWord); 205 206 // Get addr of first incoming Java argument. 207 __ z_lg(r_argument_addr, d_arg_argument_addr, r_entryframe_fp); 208 209 // Let r_argumentcopy_addr point to last outgoing Java argument. 210 __ add2reg(r_argumentcopy_addr, BytesPerWord, r_top_of_arguments_addr); // = Z_SP+160 effectively. 211 212 // Let r_argument_addr point to last incoming Java argument. 213 __ add2reg_with_index(r_argument_addr, -BytesPerWord, 214 r_argument_size_in_bytes, r_argument_addr); 215 216 // Now loop while Z_R1 > 0 and copy arguments. 217 { 218 Label next_argument; 219 __ bind(next_argument); 220 // Mem-mem move. 221 __ z_mvc(0, BytesPerWord-1, r_argumentcopy_addr, 0, r_argument_addr); 222 __ add2reg(r_argument_addr, -BytesPerWord); 223 __ add2reg(r_argumentcopy_addr, BytesPerWord); 224 __ z_brct(Z_R1, next_argument); 225 } 226 } // End of argument copy loop. 227 228 __ bind(arguments_copied); 229 } 230 BLOCK_COMMENT("} arguments"); 231 232 BLOCK_COMMENT("call {"); 233 { 234 // Call frame manager or native entry. 235 236 // 237 // Register state on entry to frame manager / native entry: 238 // 239 // Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed) 240 // Lesp = (SP) + copied_arguments_offset - 8 241 // Z_method - method 242 // Z_thread - JavaThread* 243 // 244 245 // Here, the usual SP is the initial_caller_sp. 246 __ z_lgr(Z_R10, Z_SP); 247 248 // Z_esp points to the slot below the last argument. 249 __ z_lgr(Z_esp, r_top_of_arguments_addr); 250 251 // 252 // Stack on entry to frame manager / native entry: 253 // 254 // F0 [TOP_IJAVA_FRAME_ABI] 255 // [outgoing Java arguments] 256 // [ENTRY_FRAME_LOCALS] 257 // F1 [C_FRAME] 258 // ... 259 // 260 261 // Do a light-weight C-call here, r_new_arg_entry holds the address 262 // of the interpreter entry point (frame manager or native entry) 263 // and save runtime-value of return_pc in return_address 264 // (call by reference argument). 265 return_address = __ call_stub(r_new_arg_entry); 266 } 267 BLOCK_COMMENT("} call"); 268 269 { 270 BLOCK_COMMENT("restore registers {"); 271 // Returned from frame manager or native entry. 272 // Now pop frame, process result, and return to caller. 273 274 // 275 // Stack on exit from frame manager / native entry: 276 // 277 // F0 [ABI] 278 // ... 279 // [ENTRY_FRAME_LOCALS] 280 // F1 [C_FRAME] 281 // ... 282 // 283 // Just pop the topmost frame ... 284 // 285 286 Label ret_is_object; 287 Label ret_is_long; 288 Label ret_is_float; 289 Label ret_is_double; 290 291 // Restore frame pointer. 292 __ z_lg(r_entryframe_fp, _z_abi(callers_sp), Z_SP); 293 // Pop frame. Done here to minimize stalls. 294 __ z_lg(Z_SP, _z_abi(callers_sp), Z_SP); 295 296 // Reload some volatile registers which we've spilled before the call 297 // to frame manager / native entry. 298 // Access all locals via frame pointer, because we know nothing about 299 // the topmost frame's size. 300 __ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp); 301 __ z_lg(r_arg_result_type, result_type_offset, r_entryframe_fp); 302 303 // Restore non-volatiles. 304 __ z_lmg(Z_R6, Z_R14, 16, Z_SP); 305 __ z_ld(Z_F8, 96, Z_SP); 306 __ z_ld(Z_F9, 104, Z_SP); 307 __ z_ld(Z_F10, 112, Z_SP); 308 __ z_ld(Z_F11, 120, Z_SP); 309 __ z_ld(Z_F12, 128, Z_SP); 310 __ z_ld(Z_F13, 136, Z_SP); 311 __ z_ld(Z_F14, 144, Z_SP); 312 __ z_ld(Z_F15, 152, Z_SP); 313 BLOCK_COMMENT("} restore"); 314 315 // 316 // Stack on exit from call_stub: 317 // 318 // 0 [C_FRAME] 319 // ... 320 // 321 // No call_stub frames left. 322 // 323 324 // All non-volatiles have been restored at this point!! 325 326 //------------------------------------------------------------------------ 327 // The following code makes some assumptions on the T_<type> enum values. 328 // The enum is defined in globalDefinitions.hpp. 329 // The validity of the assumptions is tested as far as possible. 330 // The assigned values should not be shuffled 331 // T_BOOLEAN==4 - lowest used enum value 332 // T_NARROWOOP==16 - largest used enum value 333 //------------------------------------------------------------------------ 334 BLOCK_COMMENT("process result {"); 335 Label firstHandler; 336 int handlerLen= 8; 337 #ifdef ASSERT 338 char assertMsg[] = "check BasicType definition in globalDefinitions.hpp"; 339 __ z_chi(r_arg_result_type, T_BOOLEAN); 340 __ asm_assert_low(assertMsg, 0x0234); 341 __ z_chi(r_arg_result_type, T_NARROWOOP); 342 __ asm_assert_high(assertMsg, 0x0235); 343 #endif 344 __ add2reg(r_arg_result_type, -T_BOOLEAN); // Remove offset. 345 __ z_larl(Z_R1, firstHandler); // location of first handler 346 __ z_sllg(r_arg_result_type, r_arg_result_type, 3); // Each handler is 8 bytes long. 347 __ z_bc(MacroAssembler::bcondAlways, 0, r_arg_result_type, Z_R1); 348 349 __ align(handlerLen); 350 __ bind(firstHandler); 351 // T_BOOLEAN: 352 guarantee(T_BOOLEAN == 4, "check BasicType definition in globalDefinitions.hpp"); 353 __ z_st(Z_RET, 0, r_arg_result_addr); 354 __ z_br(Z_R14); // Return to caller. 355 __ align(handlerLen); 356 // T_CHAR: 357 guarantee(T_CHAR == T_BOOLEAN+1, "check BasicType definition in globalDefinitions.hpp"); 358 __ z_st(Z_RET, 0, r_arg_result_addr); 359 __ z_br(Z_R14); // Return to caller. 360 __ align(handlerLen); 361 // T_FLOAT: 362 guarantee(T_FLOAT == T_CHAR+1, "check BasicType definition in globalDefinitions.hpp"); 363 __ z_ste(Z_FRET, 0, r_arg_result_addr); 364 __ z_br(Z_R14); // Return to caller. 365 __ align(handlerLen); 366 // T_DOUBLE: 367 guarantee(T_DOUBLE == T_FLOAT+1, "check BasicType definition in globalDefinitions.hpp"); 368 __ z_std(Z_FRET, 0, r_arg_result_addr); 369 __ z_br(Z_R14); // Return to caller. 370 __ align(handlerLen); 371 // T_BYTE: 372 guarantee(T_BYTE == T_DOUBLE+1, "check BasicType definition in globalDefinitions.hpp"); 373 __ z_st(Z_RET, 0, r_arg_result_addr); 374 __ z_br(Z_R14); // Return to caller. 375 __ align(handlerLen); 376 // T_SHORT: 377 guarantee(T_SHORT == T_BYTE+1, "check BasicType definition in globalDefinitions.hpp"); 378 __ z_st(Z_RET, 0, r_arg_result_addr); 379 __ z_br(Z_R14); // Return to caller. 380 __ align(handlerLen); 381 // T_INT: 382 guarantee(T_INT == T_SHORT+1, "check BasicType definition in globalDefinitions.hpp"); 383 __ z_st(Z_RET, 0, r_arg_result_addr); 384 __ z_br(Z_R14); // Return to caller. 385 __ align(handlerLen); 386 // T_LONG: 387 guarantee(T_LONG == T_INT+1, "check BasicType definition in globalDefinitions.hpp"); 388 __ z_stg(Z_RET, 0, r_arg_result_addr); 389 __ z_br(Z_R14); // Return to caller. 390 __ align(handlerLen); 391 // T_OBJECT: 392 guarantee(T_OBJECT == T_LONG+1, "check BasicType definition in globalDefinitions.hpp"); 393 __ z_stg(Z_RET, 0, r_arg_result_addr); 394 __ z_br(Z_R14); // Return to caller. 395 __ align(handlerLen); 396 // T_ARRAY: 397 guarantee(T_ARRAY == T_OBJECT+1, "check BasicType definition in globalDefinitions.hpp"); 398 __ z_stg(Z_RET, 0, r_arg_result_addr); 399 __ z_br(Z_R14); // Return to caller. 400 __ align(handlerLen); 401 // T_VOID: 402 guarantee(T_VOID == T_ARRAY+1, "check BasicType definition in globalDefinitions.hpp"); 403 __ z_stg(Z_RET, 0, r_arg_result_addr); 404 __ z_br(Z_R14); // Return to caller. 405 __ align(handlerLen); 406 // T_ADDRESS: 407 guarantee(T_ADDRESS == T_VOID+1, "check BasicType definition in globalDefinitions.hpp"); 408 __ z_stg(Z_RET, 0, r_arg_result_addr); 409 __ z_br(Z_R14); // Return to caller. 410 __ align(handlerLen); 411 // T_NARROWOOP: 412 guarantee(T_NARROWOOP == T_ADDRESS+1, "check BasicType definition in globalDefinitions.hpp"); 413 __ z_st(Z_RET, 0, r_arg_result_addr); 414 __ z_br(Z_R14); // Return to caller. 415 __ align(handlerLen); 416 BLOCK_COMMENT("} process result"); 417 } 418 return start; 419 } 420 421 // Return point for a Java call if there's an exception thrown in 422 // Java code. The exception is caught and transformed into a 423 // pending exception stored in JavaThread that can be tested from 424 // within the VM. 425 address generate_catch_exception() { 426 StubCodeMark mark(this, "StubRoutines", "catch_exception"); 427 428 address start = __ pc(); 429 430 // 431 // Registers alive 432 // 433 // Z_thread 434 // Z_ARG1 - address of pending exception 435 // Z_ARG2 - return address in call stub 436 // 437 438 const Register exception_file = Z_R0; 439 const Register exception_line = Z_R1; 440 441 __ load_const_optimized(exception_file, (void*)__FILE__); 442 __ load_const_optimized(exception_line, (void*)__LINE__); 443 444 __ z_stg(Z_ARG1, thread_(pending_exception)); 445 // Store into `char *'. 446 __ z_stg(exception_file, thread_(exception_file)); 447 // Store into `int'. 448 __ z_st(exception_line, thread_(exception_line)); 449 450 // Complete return to VM. 451 assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before"); 452 453 // Continue in call stub. 454 __ z_br(Z_ARG2); 455 456 return start; 457 } 458 459 // Continuation point for runtime calls returning with a pending 460 // exception. The pending exception check happened in the runtime 461 // or native call stub. The pending exception in Thread is 462 // converted into a Java-level exception. 463 // 464 // Read: 465 // Z_R14: pc the runtime library callee wants to return to. 466 // Since the exception occurred in the callee, the return pc 467 // from the point of view of Java is the exception pc. 468 // 469 // Invalidate: 470 // Volatile registers (except below). 471 // 472 // Update: 473 // Z_ARG1: exception 474 // (Z_R14 is unchanged and is live out). 475 // 476 address generate_forward_exception() { 477 StubCodeMark mark(this, "StubRoutines", "forward_exception"); 478 address start = __ pc(); 479 480 #define pending_exception_offset in_bytes(Thread::pending_exception_offset()) 481 #ifdef ASSERT 482 // Get pending exception oop. 483 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 484 485 // Make sure that this code is only executed if there is a pending exception. 486 { 487 Label L; 488 __ z_ltgr(Z_ARG1, Z_ARG1); 489 __ z_brne(L); 490 __ stop("StubRoutines::forward exception: no pending exception (1)"); 491 __ bind(L); 492 } 493 494 __ verify_oop(Z_ARG1, "StubRoutines::forward exception: not an oop"); 495 #endif 496 497 __ z_lgr(Z_ARG2, Z_R14); // Copy exception pc into Z_ARG2. 498 __ save_return_pc(); 499 __ push_frame_abi160(0); 500 // Find exception handler. 501 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), 502 Z_thread, 503 Z_ARG2); 504 // Copy handler's address. 505 __ z_lgr(Z_R1, Z_RET); 506 __ pop_frame(); 507 __ restore_return_pc(); 508 509 // Set up the arguments for the exception handler: 510 // - Z_ARG1: exception oop 511 // - Z_ARG2: exception pc 512 513 // Load pending exception oop. 514 __ z_lg(Z_ARG1, pending_exception_offset, Z_thread); 515 516 // The exception pc is the return address in the caller, 517 // must load it into Z_ARG2 518 __ z_lgr(Z_ARG2, Z_R14); 519 520 #ifdef ASSERT 521 // Make sure exception is set. 522 { Label L; 523 __ z_ltgr(Z_ARG1, Z_ARG1); 524 __ z_brne(L); 525 __ stop("StubRoutines::forward exception: no pending exception (2)"); 526 __ bind(L); 527 } 528 #endif 529 // Clear the pending exception. 530 __ clear_mem(Address(Z_thread, pending_exception_offset), sizeof(void *)); 531 // Jump to exception handler 532 __ z_br(Z_R1 /*handler address*/); 533 534 return start; 535 536 #undef pending_exception_offset 537 } 538 539 // Continuation point for throwing of implicit exceptions that are 540 // not handled in the current activation. Fabricates an exception 541 // oop and initiates normal exception dispatching in this 542 // frame. Only callee-saved registers are preserved (through the 543 // normal RegisterMap handling). If the compiler 544 // needs all registers to be preserved between the fault point and 545 // the exception handler then it must assume responsibility for that 546 // in AbstractCompiler::continuation_for_implicit_null_exception or 547 // continuation_for_implicit_division_by_zero_exception. All other 548 // implicit exceptions (e.g., NullPointerException or 549 // AbstractMethodError on entry) are either at call sites or 550 // otherwise assume that stack unwinding will be initiated, so 551 // caller saved registers were assumed volatile in the compiler. 552 553 // Note that we generate only this stub into a RuntimeStub, because 554 // it needs to be properly traversed and ignored during GC, so we 555 // change the meaning of the "__" macro within this method. 556 557 // Note: the routine set_pc_not_at_call_for_caller in 558 // SharedRuntime.cpp requires that this code be generated into a 559 // RuntimeStub. 560 #undef __ 561 #define __ masm-> 562 563 address generate_throw_exception(const char* name, address runtime_entry, 564 bool restore_saved_exception_pc, 565 Register arg1 = noreg, Register arg2 = noreg) { 566 int insts_size = 256; 567 int locs_size = 0; 568 CodeBuffer code(name, insts_size, locs_size); 569 MacroAssembler* masm = new MacroAssembler(&code); 570 int framesize_in_bytes; 571 address start = __ pc(); 572 573 __ save_return_pc(); 574 framesize_in_bytes = __ push_frame_abi160(0); 575 576 address frame_complete_pc = __ pc(); 577 if (restore_saved_exception_pc) { 578 __ unimplemented("StubGenerator::throw_exception", 74); 579 } 580 581 // Note that we always have a runtime stub frame on the top of stack at this point. 582 __ get_PC(Z_R1); 583 __ set_last_Java_frame(/*sp*/Z_SP, /*pc*/Z_R1); 584 585 // Do the call. 586 BLOCK_COMMENT("call runtime_entry"); 587 __ call_VM_leaf(runtime_entry, Z_thread, arg1, arg2); 588 589 __ reset_last_Java_frame(); 590 591 #ifdef ASSERT 592 // Make sure that this code is only executed if there is a pending exception. 593 { Label L; 594 __ z_lg(Z_R0, 595 in_bytes(Thread::pending_exception_offset()), 596 Z_thread); 597 __ z_ltgr(Z_R0, Z_R0); 598 __ z_brne(L); 599 __ stop("StubRoutines::throw_exception: no pending exception"); 600 __ bind(L); 601 } 602 #endif 603 604 __ pop_frame(); 605 __ restore_return_pc(); 606 607 __ load_const_optimized(Z_R1, StubRoutines::forward_exception_entry()); 608 __ z_br(Z_R1); 609 610 RuntimeStub* stub = 611 RuntimeStub::new_runtime_stub(name, &code, 612 frame_complete_pc - start, 613 framesize_in_bytes/wordSize, 614 NULL /*oop_maps*/, false); 615 616 return stub->entry_point(); 617 } 618 619 #undef __ 620 #ifdef PRODUCT 621 #define __ _masm-> 622 #else 623 #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)-> 624 #endif 625 626 //---------------------------------------------------------------------- 627 // The following routine generates a subroutine to throw an asynchronous 628 // UnknownError when an unsafe access gets a fault that could not be 629 // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) 630 // 631 // Arguments: 632 // trapping PC: ?? 633 // 634 // Results: 635 // Posts an asynchronous exception, skips the trapping instruction. 636 // 637 address generate_handler_for_unsafe_access() { 638 StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); 639 { 640 address start = __ pc(); 641 __ unimplemented("StubRoutines::handler_for_unsafe_access", 86); 642 return start; 643 } 644 } 645 646 // Support for uint StubRoutine::zarch::partial_subtype_check(Klass 647 // sub, Klass super); 648 // 649 // Arguments: 650 // ret : Z_RET, returned 651 // sub : Z_ARG2, argument, not changed 652 // super: Z_ARG3, argument, not changed 653 // 654 // raddr: Z_R14, blown by call 655 // 656 address generate_partial_subtype_check() { 657 StubCodeMark mark(this, "StubRoutines", "partial_subtype_check"); 658 Label miss; 659 660 address start = __ pc(); 661 662 const Register Rsubklass = Z_ARG2; // subklass 663 const Register Rsuperklass = Z_ARG3; // superklass 664 665 // No args, but tmp registers that are killed. 666 const Register Rlength = Z_ARG4; // cache array length 667 const Register Rarray_ptr = Z_ARG5; // Current value from cache array. 668 669 if (UseCompressedOops) { 670 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub"); 671 } 672 673 // Always take the slow path (see SPARC). 674 __ check_klass_subtype_slow_path(Rsubklass, Rsuperklass, 675 Rarray_ptr, Rlength, NULL, &miss); 676 677 // Match falls through here. 678 __ clear_reg(Z_RET); // Zero indicates a match. Set EQ flag in CC. 679 __ z_br(Z_R14); 680 681 __ BIND(miss); 682 __ load_const_optimized(Z_RET, 1); // One indicates a miss. 683 __ z_ltgr(Z_RET, Z_RET); // Set NE flag in CR. 684 __ z_br(Z_R14); 685 686 return start; 687 } 688 689 // Return address of code to be called from code generated by 690 // MacroAssembler::verify_oop. 691 // 692 // Don't generate, rather use C++ code. 693 address generate_verify_oop_subroutine() { 694 // Don't generate a StubCodeMark, because no code is generated! 695 // Generating the mark triggers notifying the oprofile jvmti agent 696 // about the dynamic code generation, but the stub without 697 // code (code_size == 0) confuses opjitconv 698 // StubCodeMark mark(this, "StubRoutines", "verify_oop_stub"); 699 700 address start = 0; 701 return start; 702 } 703 704 // Generate pre-write barrier for array. 705 // 706 // Input: 707 // addr - register containing starting address 708 // count - register containing element count 709 // 710 // The input registers are overwritten. 711 void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 712 713 BarrierSet* const bs = Universe::heap()->barrier_set(); 714 switch (bs->kind()) { 715 case BarrierSet::G1SATBCTLogging: 716 // With G1, don't generate the call if we statically know that the target in uninitialized. 717 if (!dest_uninitialized) { 718 // Is marking active? 719 Label filtered; 720 Register Rtmp1 = Z_R0; 721 const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() + 722 SATBMarkQueue::byte_offset_of_active()); 723 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 724 __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset)); 725 } else { 726 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); 727 __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset)); 728 } 729 __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently. 730 731 // __ push_frame_abi160(0); 732 (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers); 733 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count); 734 (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers); 735 // __ pop_frame(); 736 737 __ bind(filtered); 738 } 739 break; 740 case BarrierSet::CardTableForRS: 741 case BarrierSet::CardTableExtension: 742 case BarrierSet::ModRef: 743 break; 744 default: 745 ShouldNotReachHere(); 746 } 747 } 748 749 // Generate post-write barrier for array. 750 // 751 // Input: 752 // addr - register containing starting address 753 // count - register containing element count 754 // 755 // The input registers are overwritten. 756 void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) { 757 BarrierSet* const bs = Universe::heap()->barrier_set(); 758 switch (bs->kind()) { 759 case BarrierSet::G1SATBCTLogging: 760 { 761 if (branchToEnd) { 762 // __ push_frame_abi160(0); 763 (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers); 764 __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count); 765 (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers); 766 // __ pop_frame(); 767 } else { 768 // Tail call: call c and return to stub caller. 769 address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post); 770 if (Z_ARG1 != addr) __ z_lgr(Z_ARG1, addr); 771 if (Z_ARG2 != count) __ z_lgr(Z_ARG2, count); 772 __ load_const(Z_R1, entry_point); 773 __ z_br(Z_R1); // Branch without linking, callee will return to stub caller. 774 } 775 } 776 break; 777 case BarrierSet::CardTableForRS: 778 case BarrierSet::CardTableExtension: 779 // These cases formerly known as 780 // void array_store_check(Register addr, Register count, bool branchToEnd). 781 { 782 NearLabel doXC, done; 783 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 784 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 785 assert_different_registers(Z_R0, Z_R1, addr, count); 786 787 // Nothing to do if count <= 0. 788 if (branchToEnd) { 789 __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done); 790 } else { 791 __ z_ltgr(count, count); 792 __ z_bcr(Assembler::bcondNotPositive, Z_R14); 793 } 794 795 // Note: We can't combine the shifts. We could lose a carry 796 // from calculating the array end address. 797 // count = (count-1)*BytesPerHeapOop + addr 798 // Count holds addr of last oop in array then. 799 __ z_sllg(count, count, LogBytesPerHeapOop); 800 __ add2reg_with_index(count, -BytesPerHeapOop, count, addr); 801 802 // Get base address of card table. 803 __ load_const_optimized(Z_R1, (address)ct->byte_map_base); 804 805 // count = (count>>shift) - (addr>>shift) 806 __ z_srlg(addr, addr, CardTableModRefBS::card_shift); 807 __ z_srlg(count, count, CardTableModRefBS::card_shift); 808 809 // Prefetch first elements of card table for update. 810 if (VM_Version::has_Prefetch()) { 811 __ z_pfd(0x02, 0, addr, Z_R1); 812 } 813 814 // Special case: clear just one byte. 815 __ clear_reg(Z_R0, true, false); // Used for doOneByte. 816 __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below. 817 __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr. 818 if (branchToEnd) { 819 __ z_brz(done); 820 } else { 821 __ z_bcr(Assembler::bcondZero, Z_R14); 822 } 823 824 __ z_cghi(count, 255); 825 __ z_brnh(doXC); 826 827 // MVCLE: clear a long area. 828 // Start addr of card table range = base + addr. 829 // # bytes in card table range = (count + 1) 830 __ add2reg_with_index(Z_R0, 0, Z_R1, addr); 831 __ add2reg(Z_R1, 1, count); 832 833 // dirty hack: 834 // There are just two callers. Both pass 835 // count in Z_ARG3 = Z_R4 836 // addr in Z_ARG2 = Z_R3 837 // ==> use Z_ARG2 as src len reg = 0 838 // Z_ARG1 as src addr (ignored) 839 assert(count == Z_ARG3, "count: unexpected register number"); 840 assert(addr == Z_ARG2, "addr: unexpected register number"); 841 __ clear_reg(Z_ARG2, true, false); 842 843 __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0); 844 845 if (branchToEnd) { 846 __ z_bru(done); 847 } else { 848 __ z_bcr(Assembler::bcondAlways, Z_R14); 849 } 850 851 // XC: clear a short area. 852 Label XC_template; // Instr template, never exec directly! 853 __ bind(XC_template); 854 __ z_xc(0, 0, addr, 0, addr); 855 856 __ bind(doXC); 857 // start addr of card table range = base + addr 858 // end addr of card table range = base + addr + count 859 __ add2reg_with_index(addr, 0, Z_R1, addr); 860 861 if (VM_Version::has_ExecuteExtensions()) { 862 __ z_exrl(count, XC_template); // Execute XC with var. len. 863 } else { 864 __ z_larl(Z_R1, XC_template); 865 __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len. 866 } 867 if (!branchToEnd) { 868 __ z_br(Z_R14); 869 } 870 871 __ bind(done); 872 } 873 break; 874 case BarrierSet::ModRef: 875 if (!branchToEnd) { __ z_br(Z_R14); } 876 break; 877 default: 878 ShouldNotReachHere(); 879 } 880 } 881 882 883 // This is to test that the count register contains a positive int value. 884 // Required because C2 does not respect int to long conversion for stub calls. 885 void assert_positive_int(Register count) { 886 #ifdef ASSERT 887 __ z_srag(Z_R0, count, 31); // Just leave the sign (must be zero) in Z_R0. 888 __ asm_assert_eq("missing zero extend", 0xAFFE); 889 #endif 890 } 891 892 // Generate overlap test for array copy stubs. 893 // If no actual overlap is detected, control is transferred to the 894 // "normal" copy stub (entry address passed in disjoint_copy_target). 895 // Otherwise, execution continues with the code generated by the 896 // caller of array_overlap_test. 897 // 898 // Input: 899 // Z_ARG1 - from 900 // Z_ARG2 - to 901 // Z_ARG3 - element count 902 void array_overlap_test(address disjoint_copy_target, int log2_elem_size) { 903 __ MacroAssembler::compare_and_branch_optimized(Z_ARG2, Z_ARG1, Assembler::bcondNotHigh, 904 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 905 906 Register index = Z_ARG3; 907 if (log2_elem_size > 0) { 908 __ z_sllg(Z_R1, Z_ARG3, log2_elem_size); // byte count 909 index = Z_R1; 910 } 911 __ add2reg_with_index(Z_R1, 0, index, Z_ARG1); // First byte after "from" range. 912 913 __ MacroAssembler::compare_and_branch_optimized(Z_R1, Z_ARG2, Assembler::bcondNotHigh, 914 disjoint_copy_target, /*len64=*/true, /*has_sign=*/false); 915 916 // Destructive overlap: let caller generate code for that. 917 } 918 919 // Generate stub for disjoint array copy. If "aligned" is true, the 920 // "from" and "to" addresses are assumed to be heapword aligned. 921 // 922 // Arguments for generated stub: 923 // from: Z_ARG1 924 // to: Z_ARG2 925 // count: Z_ARG3 treated as signed 926 void generate_disjoint_copy(bool aligned, int element_size, 927 bool branchToEnd, 928 bool restoreArgs) { 929 // This is the zarch specific stub generator for general array copy tasks. 930 // It has the following prereqs and features: 931 // 932 // - No destructive overlap allowed (else unpredictable results). 933 // - Destructive overlap does not exist if the leftmost byte of the target 934 // does not coincide with any of the source bytes (except the leftmost). 935 // 936 // Register usage upon entry: 937 // Z_ARG1 == Z_R2 : address of source array 938 // Z_ARG2 == Z_R3 : address of target array 939 // Z_ARG3 == Z_R4 : length of operands (# of elements on entry) 940 // 941 // Register usage within the generator: 942 // - Z_R0 and Z_R1 are KILLed by the stub routine (target addr/len). 943 // Used as pair register operand in complex moves, scratch registers anyway. 944 // - Z_R5 is KILLed by the stub routine (source register pair addr/len) (even/odd reg). 945 // Same as R0/R1, but no scratch register. 946 // - Z_ARG1, Z_ARG2, Z_ARG3 are USEd but preserved by the stub routine, 947 // but they might get temporarily overwritten. 948 949 Register save_reg = Z_ARG4; // (= Z_R5), holds original target operand address for restore. 950 951 { 952 Register llen_reg = Z_R1; // Holds left operand len (odd reg). 953 Register laddr_reg = Z_R0; // Holds left operand addr (even reg), overlaps with data_reg. 954 Register rlen_reg = Z_R5; // Holds right operand len (odd reg), overlaps with save_reg. 955 Register raddr_reg = Z_R4; // Holds right operand addr (even reg), overlaps with len_reg. 956 957 Register data_reg = Z_R0; // Holds copied data chunk in alignment process and copy loop. 958 Register len_reg = Z_ARG3; // Holds operand len (#elements at entry, #bytes shortly after). 959 Register dst_reg = Z_ARG2; // Holds left (target) operand addr. 960 Register src_reg = Z_ARG1; // Holds right (source) operand addr. 961 962 Label doMVCLOOP, doMVCLOOPcount, doMVCLOOPiterate; 963 Label doMVCUnrolled; 964 NearLabel doMVC, doMVCgeneral, done; 965 Label MVC_template; 966 address pcMVCblock_b, pcMVCblock_e; 967 968 bool usedMVCLE = true; 969 bool usedMVCLOOP = true; 970 bool usedMVCUnrolled = false; 971 bool usedMVC = false; 972 bool usedMVCgeneral = false; 973 974 int stride; 975 Register stride_reg; 976 Register ix_reg; 977 978 assert((element_size<=256) && (256%element_size == 0), "element size must be <= 256, power of 2"); 979 unsigned int log2_size = exact_log2(element_size); 980 981 switch (element_size) { 982 case 1: BLOCK_COMMENT("ARRAYCOPY DISJOINT byte {"); break; 983 case 2: BLOCK_COMMENT("ARRAYCOPY DISJOINT short {"); break; 984 case 4: BLOCK_COMMENT("ARRAYCOPY DISJOINT int {"); break; 985 case 8: BLOCK_COMMENT("ARRAYCOPY DISJOINT long {"); break; 986 default: BLOCK_COMMENT("ARRAYCOPY DISJOINT {"); break; 987 } 988 989 assert_positive_int(len_reg); 990 991 BLOCK_COMMENT("preparation {"); 992 993 // No copying if len <= 0. 994 if (branchToEnd) { 995 __ compare64_and_branch(len_reg, (intptr_t) 0, Assembler::bcondNotHigh, done); 996 } else { 997 if (VM_Version::has_CompareBranch()) { 998 __ z_cgib(len_reg, 0, Assembler::bcondNotHigh, 0, Z_R14); 999 } else { 1000 __ z_ltgr(len_reg, len_reg); 1001 __ z_bcr(Assembler::bcondNotPositive, Z_R14); 1002 } 1003 } 1004 1005 // Prefetch just one cache line. Speculative opt for short arrays. 1006 // Do not use Z_R1 in prefetch. Is undefined here. 1007 if (VM_Version::has_Prefetch()) { 1008 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 1009 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 1010 } 1011 1012 BLOCK_COMMENT("} preparation"); 1013 1014 // Save args only if really needed. 1015 // Keep len test local to branch. Is generated only once. 1016 1017 BLOCK_COMMENT("mode selection {"); 1018 1019 // Special handling for arrays with only a few elements. 1020 // Nothing fancy: just an executed MVC. 1021 if (log2_size > 0) { 1022 __ z_sllg(Z_R1, len_reg, log2_size); // Remember #bytes in Z_R1. 1023 } 1024 if (element_size != 8) { 1025 __ z_cghi(len_reg, 256/element_size); 1026 __ z_brnh(doMVC); 1027 usedMVC = true; 1028 } 1029 if (element_size == 8) { // Long and oop arrays are always aligned. 1030 __ z_cghi(len_reg, 256/element_size); 1031 __ z_brnh(doMVCUnrolled); 1032 usedMVCUnrolled = true; 1033 } 1034 1035 // Prefetch another cache line. We, for sure, have more than one line to copy. 1036 if (VM_Version::has_Prefetch()) { 1037 __ z_pfd(0x01, 256, Z_R0, src_reg); // Fetch access. 1038 __ z_pfd(0x02, 256, Z_R0, dst_reg); // Store access. 1039 } 1040 1041 if (restoreArgs) { 1042 // Remember entry value of ARG2 to restore all arguments later from that knowledge. 1043 __ z_lgr(save_reg, dst_reg); 1044 } 1045 1046 __ z_cghi(len_reg, 4096/element_size); 1047 if (log2_size == 0) { 1048 __ z_lgr(Z_R1, len_reg); // Init Z_R1 with #bytes 1049 } 1050 __ z_brnh(doMVCLOOP); 1051 1052 // Fall through to MVCLE case. 1053 1054 BLOCK_COMMENT("} mode selection"); 1055 1056 // MVCLE: for long arrays 1057 // DW aligned: Best performance for sizes > 4kBytes. 1058 // unaligned: Least complex for sizes > 256 bytes. 1059 if (usedMVCLE) { 1060 BLOCK_COMMENT("mode MVCLE {"); 1061 1062 // Setup registers for mvcle. 1063 //__ z_lgr(llen_reg, len_reg);// r1 <- r4 #bytes already in Z_R1, aka llen_reg. 1064 __ z_lgr(laddr_reg, dst_reg); // r0 <- r3 1065 __ z_lgr(raddr_reg, src_reg); // r4 <- r2 1066 __ z_lgr(rlen_reg, llen_reg); // r5 <- r1 1067 1068 __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb0); // special: bypass cache 1069 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0xb8); // special: Hold data in cache. 1070 // __ MacroAssembler::move_long_ext(laddr_reg, raddr_reg, 0); 1071 1072 if (restoreArgs) { 1073 // MVCLE updates the source (Z_R4,Z_R5) and target (Z_R0,Z_R1) register pairs. 1074 // Dst_reg (Z_ARG2) and src_reg (Z_ARG1) are left untouched. No restore required. 1075 // Len_reg (Z_ARG3) is destroyed and must be restored. 1076 __ z_slgr(laddr_reg, dst_reg); // copied #bytes 1077 if (log2_size > 0) { 1078 __ z_srag(Z_ARG3, laddr_reg, log2_size); // Convert back to #elements. 1079 } else { 1080 __ z_lgr(Z_ARG3, laddr_reg); 1081 } 1082 } 1083 if (branchToEnd) { 1084 __ z_bru(done); 1085 } else { 1086 __ z_br(Z_R14); 1087 } 1088 BLOCK_COMMENT("} mode MVCLE"); 1089 } 1090 // No fallthru possible here. 1091 1092 // MVCUnrolled: for short, aligned arrays. 1093 1094 if (usedMVCUnrolled) { 1095 BLOCK_COMMENT("mode MVC unrolled {"); 1096 stride = 8; 1097 1098 // Generate unrolled MVC instructions. 1099 for (int ii = 32; ii > 1; ii--) { 1100 __ z_mvc(0, ii * stride-1, dst_reg, 0, src_reg); // ii*8 byte copy 1101 if (branchToEnd) { 1102 __ z_bru(done); 1103 } else { 1104 __ z_br(Z_R14); 1105 } 1106 } 1107 1108 pcMVCblock_b = __ pc(); 1109 __ z_mvc(0, 1 * stride-1, dst_reg, 0, src_reg); // 8 byte copy 1110 if (branchToEnd) { 1111 __ z_bru(done); 1112 } else { 1113 __ z_br(Z_R14); 1114 } 1115 1116 pcMVCblock_e = __ pc(); 1117 Label MVC_ListEnd; 1118 __ bind(MVC_ListEnd); 1119 1120 // This is an absolute fast path: 1121 // - Array len in bytes must be not greater than 256. 1122 // - Array len in bytes must be an integer mult of DW 1123 // to save expensive handling of trailing bytes. 1124 // - Argument restore is not done, 1125 // i.e. previous code must not alter arguments (this code doesn't either). 1126 1127 __ bind(doMVCUnrolled); 1128 1129 // Avoid mul, prefer shift where possible. 1130 // Combine shift right (for #DW) with shift left (for block size). 1131 // Set CC for zero test below (asm_assert). 1132 // Note: #bytes comes in Z_R1, #DW in len_reg. 1133 unsigned int MVCblocksize = pcMVCblock_e - pcMVCblock_b; 1134 unsigned int logMVCblocksize = 0xffffffffU; // Pacify compiler ("used uninitialized" warning). 1135 1136 if (log2_size > 0) { // Len was scaled into Z_R1. 1137 switch (MVCblocksize) { 1138 1139 case 8: logMVCblocksize = 3; 1140 __ z_ltgr(Z_R0, Z_R1); // #bytes is index 1141 break; // reasonable size, use shift 1142 1143 case 16: logMVCblocksize = 4; 1144 __ z_slag(Z_R0, Z_R1, logMVCblocksize-log2_size); 1145 break; // reasonable size, use shift 1146 1147 default: logMVCblocksize = 0; 1148 __ z_ltgr(Z_R0, len_reg); // #DW for mul 1149 break; // all other sizes: use mul 1150 } 1151 } else { 1152 guarantee(log2_size, "doMVCUnrolled: only for DW entities"); 1153 } 1154 1155 // This test (and branch) is redundant. Previous code makes sure that 1156 // - element count > 0 1157 // - element size == 8. 1158 // Thus, len reg should never be zero here. We insert an asm_assert() here, 1159 // just to double-check and to be on the safe side. 1160 __ asm_assert(false, "zero len cannot occur", 99); 1161 1162 __ z_larl(Z_R1, MVC_ListEnd); // Get addr of last instr block. 1163 // Avoid mul, prefer shift where possible. 1164 if (logMVCblocksize == 0) { 1165 __ z_mghi(Z_R0, MVCblocksize); 1166 } 1167 __ z_slgr(Z_R1, Z_R0); 1168 __ z_br(Z_R1); 1169 BLOCK_COMMENT("} mode MVC unrolled"); 1170 } 1171 // No fallthru possible here. 1172 1173 // MVC execute template 1174 // Must always generate. Usage may be switched on below. 1175 // There is no suitable place after here to put the template. 1176 __ bind(MVC_template); 1177 __ z_mvc(0,0,dst_reg,0,src_reg); // Instr template, never exec directly! 1178 1179 1180 // MVC Loop: for medium-sized arrays 1181 1182 // Only for DW aligned arrays (src and dst). 1183 // #bytes to copy must be at least 256!!! 1184 // Non-aligned cases handled separately. 1185 stride = 256; 1186 stride_reg = Z_R1; // Holds #bytes when control arrives here. 1187 ix_reg = Z_ARG3; // Alias for len_reg. 1188 1189 1190 if (usedMVCLOOP) { 1191 BLOCK_COMMENT("mode MVC loop {"); 1192 __ bind(doMVCLOOP); 1193 1194 __ z_lcgr(ix_reg, Z_R1); // Ix runs from -(n-2)*stride to 1*stride (inclusive). 1195 __ z_llill(stride_reg, stride); 1196 __ add2reg(ix_reg, 2*stride); // Thus: increment ix by 2*stride. 1197 1198 __ bind(doMVCLOOPiterate); 1199 __ z_mvc(0, stride-1, dst_reg, 0, src_reg); 1200 __ add2reg(dst_reg, stride); 1201 __ add2reg(src_reg, stride); 1202 __ bind(doMVCLOOPcount); 1203 __ z_brxlg(ix_reg, stride_reg, doMVCLOOPiterate); 1204 1205 // Don 't use add2reg() here, since we must set the condition code! 1206 __ z_aghi(ix_reg, -2*stride); // Compensate incr from above: zero diff means "all copied". 1207 1208 if (restoreArgs) { 1209 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1210 __ z_brnz(doMVCgeneral); // We're not done yet, ix_reg is not zero. 1211 1212 // ARG1, ARG2, and ARG3 were altered by the code above, so restore them building on save_reg. 1213 __ z_slgr(dst_reg, save_reg); // copied #bytes 1214 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored) 1215 if (log2_size) { 1216 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3. 1217 } else { 1218 __ z_lgr(Z_ARG3, dst_reg); 1219 } 1220 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1221 1222 if (branchToEnd) { 1223 __ z_bru(done); 1224 } else { 1225 __ z_br(Z_R14); 1226 } 1227 1228 } else { 1229 if (branchToEnd) { 1230 __ z_brz(done); // CC set by aghi instr. 1231 } else { 1232 __ z_bcr(Assembler::bcondZero, Z_R14); // We're all done if zero. 1233 } 1234 1235 __ z_lcgr(Z_R1, ix_reg); // Prepare ix_reg for copy loop, #bytes expected in Z_R1. 1236 // __ z_bru(doMVCgeneral); // fallthru 1237 } 1238 usedMVCgeneral = true; 1239 BLOCK_COMMENT("} mode MVC loop"); 1240 } 1241 // Fallthru to doMVCgeneral 1242 1243 // MVCgeneral: for short, unaligned arrays, after other copy operations 1244 1245 // Somewhat expensive due to use of EX instruction, but simple. 1246 if (usedMVCgeneral) { 1247 BLOCK_COMMENT("mode MVC general {"); 1248 __ bind(doMVCgeneral); 1249 1250 __ add2reg(len_reg, -1, Z_R1); // Get #bytes-1 for EXECUTE. 1251 if (VM_Version::has_ExecuteExtensions()) { 1252 __ z_exrl(len_reg, MVC_template); // Execute MVC with variable length. 1253 } else { 1254 __ z_larl(Z_R1, MVC_template); // Get addr of instr template. 1255 __ z_ex(len_reg, 0, Z_R0, Z_R1); // Execute MVC with variable length. 1256 } // penalty: 9 ticks 1257 1258 if (restoreArgs) { 1259 // ARG1, ARG2, and ARG3 were altered by code executed before, so restore them building on save_reg 1260 __ z_slgr(dst_reg, save_reg); // Copied #bytes without the "doMVCgeneral" chunk 1261 __ z_slgr(src_reg, dst_reg); // = ARG1 (now restored), was not advanced for "doMVCgeneral" chunk 1262 __ add2reg_with_index(dst_reg, 1, len_reg, dst_reg); // Len of executed MVC was not accounted for, yet. 1263 if (log2_size) { 1264 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3 1265 } else { 1266 __ z_lgr(Z_ARG3, dst_reg); 1267 } 1268 __ z_lgr(Z_ARG2, save_reg); // ARG2 now restored. 1269 } 1270 1271 if (usedMVC) { 1272 if (branchToEnd) { 1273 __ z_bru(done); 1274 } else { 1275 __ z_br(Z_R14); 1276 } 1277 } else { 1278 if (!branchToEnd) __ z_br(Z_R14); 1279 } 1280 BLOCK_COMMENT("} mode MVC general"); 1281 } 1282 // Fallthru possible if following block not generated. 1283 1284 // MVC: for short, unaligned arrays 1285 1286 // Somewhat expensive due to use of EX instruction, but simple. penalty: 9 ticks. 1287 // Differs from doMVCgeneral in reconstruction of ARG2, ARG3, and ARG4. 1288 if (usedMVC) { 1289 BLOCK_COMMENT("mode MVC {"); 1290 __ bind(doMVC); 1291 1292 // get #bytes-1 for EXECUTE 1293 if (log2_size) { 1294 __ add2reg(Z_R1, -1); // Length was scaled into Z_R1. 1295 } else { 1296 __ add2reg(Z_R1, -1, len_reg); // Length was not scaled. 1297 } 1298 1299 if (VM_Version::has_ExecuteExtensions()) { 1300 __ z_exrl(Z_R1, MVC_template); // Execute MVC with variable length. 1301 } else { 1302 __ z_lgr(Z_R0, Z_R5); // Save ARG4, may be unnecessary. 1303 __ z_larl(Z_R5, MVC_template); // Get addr of instr template. 1304 __ z_ex(Z_R1, 0, Z_R0, Z_R5); // Execute MVC with variable length. 1305 __ z_lgr(Z_R5, Z_R0); // Restore ARG4, may be unnecessary. 1306 } 1307 1308 if (!branchToEnd) { 1309 __ z_br(Z_R14); 1310 } 1311 BLOCK_COMMENT("} mode MVC"); 1312 } 1313 1314 __ bind(done); 1315 1316 switch (element_size) { 1317 case 1: BLOCK_COMMENT("} ARRAYCOPY DISJOINT byte "); break; 1318 case 2: BLOCK_COMMENT("} ARRAYCOPY DISJOINT short"); break; 1319 case 4: BLOCK_COMMENT("} ARRAYCOPY DISJOINT int "); break; 1320 case 8: BLOCK_COMMENT("} ARRAYCOPY DISJOINT long "); break; 1321 default: BLOCK_COMMENT("} ARRAYCOPY DISJOINT "); break; 1322 } 1323 } 1324 } 1325 1326 // Generate stub for conjoint array copy. If "aligned" is true, the 1327 // "from" and "to" addresses are assumed to be heapword aligned. 1328 // 1329 // Arguments for generated stub: 1330 // from: Z_ARG1 1331 // to: Z_ARG2 1332 // count: Z_ARG3 treated as signed 1333 void generate_conjoint_copy(bool aligned, int element_size, bool branchToEnd) { 1334 1335 // This is the zarch specific stub generator for general array copy tasks. 1336 // It has the following prereqs and features: 1337 // 1338 // - Destructive overlap exists and is handled by reverse copy. 1339 // - Destructive overlap exists if the leftmost byte of the target 1340 // does coincide with any of the source bytes (except the leftmost). 1341 // - Z_R0 and Z_R1 are KILLed by the stub routine (data and stride) 1342 // - Z_ARG1 and Z_ARG2 are USEd but preserved by the stub routine. 1343 // - Z_ARG3 is USED but preserved by the stub routine. 1344 // - Z_ARG4 is used as index register and is thus KILLed. 1345 // 1346 { 1347 Register stride_reg = Z_R1; // Stride & compare value in loop (negative element_size). 1348 Register data_reg = Z_R0; // Holds value of currently processed element. 1349 Register ix_reg = Z_ARG4; // Holds byte index of currently processed element. 1350 Register len_reg = Z_ARG3; // Holds length (in #elements) of arrays. 1351 Register dst_reg = Z_ARG2; // Holds left operand addr. 1352 Register src_reg = Z_ARG1; // Holds right operand addr. 1353 1354 assert(256%element_size == 0, "Element size must be power of 2."); 1355 assert(element_size <= 8, "Can't handle more than DW units."); 1356 1357 switch (element_size) { 1358 case 1: BLOCK_COMMENT("ARRAYCOPY CONJOINT byte {"); break; 1359 case 2: BLOCK_COMMENT("ARRAYCOPY CONJOINT short {"); break; 1360 case 4: BLOCK_COMMENT("ARRAYCOPY CONJOINT int {"); break; 1361 case 8: BLOCK_COMMENT("ARRAYCOPY CONJOINT long {"); break; 1362 default: BLOCK_COMMENT("ARRAYCOPY CONJOINT {"); break; 1363 } 1364 1365 assert_positive_int(len_reg); 1366 1367 if (VM_Version::has_Prefetch()) { 1368 __ z_pfd(0x01, 0, Z_R0, src_reg); // Fetch access. 1369 __ z_pfd(0x02, 0, Z_R0, dst_reg); // Store access. 1370 } 1371 1372 unsigned int log2_size = exact_log2(element_size); 1373 if (log2_size) { 1374 __ z_sllg(ix_reg, len_reg, log2_size); 1375 } else { 1376 __ z_lgr(ix_reg, len_reg); 1377 } 1378 1379 // Optimize reverse copy loop. 1380 // Main loop copies DW units which may be unaligned. Unaligned access adds some penalty ticks. 1381 // Unaligned DW access (neither fetch nor store) is DW-atomic, but should be alignment-atomic. 1382 // Preceding the main loop, some bytes are copied to obtain a DW-multiple remaining length. 1383 1384 Label countLoop1; 1385 Label copyLoop1; 1386 Label skipBY; 1387 Label skipHW; 1388 int stride = -8; 1389 1390 __ load_const_optimized(stride_reg, stride); // Prepare for DW copy loop. 1391 1392 if (element_size == 8) // Nothing to do here. 1393 __ z_bru(countLoop1); 1394 else { // Do not generate dead code. 1395 __ z_tmll(ix_reg, 7); // Check the "odd" bits. 1396 __ z_bre(countLoop1); // There are none, very good! 1397 } 1398 1399 if (log2_size == 0) { // Handle leftover Byte. 1400 __ z_tmll(ix_reg, 1); 1401 __ z_bre(skipBY); 1402 __ z_lb(data_reg, -1, ix_reg, src_reg); 1403 __ z_stcy(data_reg, -1, ix_reg, dst_reg); 1404 __ add2reg(ix_reg, -1); // Decrement delayed to avoid AGI. 1405 __ bind(skipBY); 1406 // fallthru 1407 } 1408 if (log2_size <= 1) { // Handle leftover HW. 1409 __ z_tmll(ix_reg, 2); 1410 __ z_bre(skipHW); 1411 __ z_lhy(data_reg, -2, ix_reg, src_reg); 1412 __ z_sthy(data_reg, -2, ix_reg, dst_reg); 1413 __ add2reg(ix_reg, -2); // Decrement delayed to avoid AGI. 1414 __ bind(skipHW); 1415 __ z_tmll(ix_reg, 4); 1416 __ z_bre(countLoop1); 1417 // fallthru 1418 } 1419 if (log2_size <= 2) { // There are just 4 bytes (left) that need to be copied. 1420 __ z_ly(data_reg, -4, ix_reg, src_reg); 1421 __ z_sty(data_reg, -4, ix_reg, dst_reg); 1422 __ add2reg(ix_reg, -4); // Decrement delayed to avoid AGI. 1423 __ z_bru(countLoop1); 1424 } 1425 1426 // Control can never get to here. Never! Never ever! 1427 __ z_illtrap(0x99); 1428 __ bind(copyLoop1); 1429 __ z_lg(data_reg, 0, ix_reg, src_reg); 1430 __ z_stg(data_reg, 0, ix_reg, dst_reg); 1431 __ bind(countLoop1); 1432 __ z_brxhg(ix_reg, stride_reg, copyLoop1); 1433 1434 if (!branchToEnd) 1435 __ z_br(Z_R14); 1436 1437 switch (element_size) { 1438 case 1: BLOCK_COMMENT("} ARRAYCOPY CONJOINT byte "); break; 1439 case 2: BLOCK_COMMENT("} ARRAYCOPY CONJOINT short"); break; 1440 case 4: BLOCK_COMMENT("} ARRAYCOPY CONJOINT int "); break; 1441 case 8: BLOCK_COMMENT("} ARRAYCOPY CONJOINT long "); break; 1442 default: BLOCK_COMMENT("} ARRAYCOPY CONJOINT "); break; 1443 } 1444 } 1445 } 1446 1447 // Generate stub for disjoint byte copy. If "aligned" is true, the 1448 // "from" and "to" addresses are assumed to be heapword aligned. 1449 address generate_disjoint_byte_copy(bool aligned, const char * name) { 1450 StubCodeMark mark(this, "StubRoutines", name); 1451 1452 // This is the zarch specific stub generator for byte array copy. 1453 // Refer to generate_disjoint_copy for a list of prereqs and features: 1454 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1455 generate_disjoint_copy(aligned, 1, false, false); 1456 return __ addr_at(start_off); 1457 } 1458 1459 1460 address generate_disjoint_short_copy(bool aligned, const char * name) { 1461 StubCodeMark mark(this, "StubRoutines", name); 1462 // This is the zarch specific stub generator for short array copy. 1463 // Refer to generate_disjoint_copy for a list of prereqs and features: 1464 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1465 generate_disjoint_copy(aligned, 2, false, false); 1466 return __ addr_at(start_off); 1467 } 1468 1469 1470 address generate_disjoint_int_copy(bool aligned, const char * name) { 1471 StubCodeMark mark(this, "StubRoutines", name); 1472 // This is the zarch specific stub generator for int array copy. 1473 // Refer to generate_disjoint_copy for a list of prereqs and features: 1474 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1475 generate_disjoint_copy(aligned, 4, false, false); 1476 return __ addr_at(start_off); 1477 } 1478 1479 1480 address generate_disjoint_long_copy(bool aligned, const char * name) { 1481 StubCodeMark mark(this, "StubRoutines", name); 1482 // This is the zarch specific stub generator for long array copy. 1483 // Refer to generate_disjoint_copy for a list of prereqs and features: 1484 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1485 generate_disjoint_copy(aligned, 8, false, false); 1486 return __ addr_at(start_off); 1487 } 1488 1489 1490 address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1491 StubCodeMark mark(this, "StubRoutines", name); 1492 // This is the zarch specific stub generator for oop array copy. 1493 // Refer to generate_disjoint_copy for a list of prereqs and features. 1494 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1495 unsigned int size = UseCompressedOops ? 4 : 8; 1496 1497 gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized); 1498 1499 generate_disjoint_copy(aligned, size, true, true); 1500 1501 gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false); 1502 1503 return __ addr_at(start_off); 1504 } 1505 1506 1507 address generate_conjoint_byte_copy(bool aligned, const char * name) { 1508 StubCodeMark mark(this, "StubRoutines", name); 1509 // This is the zarch specific stub generator for overlapping byte array copy. 1510 // Refer to generate_conjoint_copy for a list of prereqs and features: 1511 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1512 address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy() 1513 : StubRoutines::jbyte_disjoint_arraycopy(); 1514 1515 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint. 1516 generate_conjoint_copy(aligned, 1, false); 1517 1518 return __ addr_at(start_off); 1519 } 1520 1521 1522 address generate_conjoint_short_copy(bool aligned, const char * name) { 1523 StubCodeMark mark(this, "StubRoutines", name); 1524 // This is the zarch specific stub generator for overlapping short array copy. 1525 // Refer to generate_conjoint_copy for a list of prereqs and features: 1526 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1527 address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy() 1528 : StubRoutines::jshort_disjoint_arraycopy(); 1529 1530 array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint. 1531 generate_conjoint_copy(aligned, 2, false); 1532 1533 return __ addr_at(start_off); 1534 } 1535 1536 address generate_conjoint_int_copy(bool aligned, const char * name) { 1537 StubCodeMark mark(this, "StubRoutines", name); 1538 // This is the zarch specific stub generator for overlapping int array copy. 1539 // Refer to generate_conjoint_copy for a list of prereqs and features: 1540 1541 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1542 address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy() 1543 : StubRoutines::jint_disjoint_arraycopy(); 1544 1545 array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint. 1546 generate_conjoint_copy(aligned, 4, false); 1547 1548 return __ addr_at(start_off); 1549 } 1550 1551 address generate_conjoint_long_copy(bool aligned, const char * name) { 1552 StubCodeMark mark(this, "StubRoutines", name); 1553 // This is the zarch specific stub generator for overlapping long array copy. 1554 // Refer to generate_conjoint_copy for a list of prereqs and features: 1555 1556 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1557 address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy() 1558 : StubRoutines::jlong_disjoint_arraycopy(); 1559 1560 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint. 1561 generate_conjoint_copy(aligned, 8, false); 1562 1563 return __ addr_at(start_off); 1564 } 1565 1566 address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) { 1567 StubCodeMark mark(this, "StubRoutines", name); 1568 // This is the zarch specific stub generator for overlapping oop array copy. 1569 // Refer to generate_conjoint_copy for a list of prereqs and features. 1570 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1571 unsigned int size = UseCompressedOops ? 4 : 8; 1572 unsigned int shift = UseCompressedOops ? 2 : 3; 1573 1574 address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized) 1575 : StubRoutines::oop_disjoint_arraycopy(dest_uninitialized); 1576 1577 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier. 1578 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint. 1579 1580 gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized); 1581 1582 generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3. 1583 1584 gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false); 1585 1586 return __ addr_at(start_off); 1587 } 1588 1589 1590 void generate_arraycopy_stubs() { 1591 1592 // Note: the disjoint stubs must be generated first, some of 1593 // the conjoint stubs use them. 1594 StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy"); 1595 StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 1596 StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy"); 1597 StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy"); 1598 StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false); 1599 StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true); 1600 1601 StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy"); 1602 StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 1603 StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy"); 1604 StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy"); 1605 StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false); 1606 StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true); 1607 1608 StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy"); 1609 StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 1610 StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy"); 1611 StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy"); 1612 StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false); 1613 StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true); 1614 1615 StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy"); 1616 StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 1617 StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy"); 1618 StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy"); 1619 StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false); 1620 StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true); 1621 } 1622 1623 void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) { 1624 1625 // safefetch signatures: 1626 // int SafeFetch32(int* adr, int errValue); 1627 // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue); 1628 // 1629 // arguments: 1630 // Z_ARG1 = adr 1631 // Z_ARG2 = errValue 1632 // 1633 // result: 1634 // Z_RET = *adr or errValue 1635 1636 StubCodeMark mark(this, "StubRoutines", name); 1637 1638 // entry point 1639 // Load *adr into Z_ARG2, may fault. 1640 *entry = *fault_pc = __ pc(); 1641 switch (size) { 1642 case 4: 1643 // Sign extended int32_t. 1644 __ z_lgf(Z_ARG2, 0, Z_ARG1); 1645 break; 1646 case 8: 1647 // int64_t 1648 __ z_lg(Z_ARG2, 0, Z_ARG1); 1649 break; 1650 default: 1651 ShouldNotReachHere(); 1652 } 1653 1654 // Return errValue or *adr. 1655 *continuation_pc = __ pc(); 1656 __ z_lgr(Z_RET, Z_ARG2); 1657 __ z_br(Z_R14); 1658 1659 } 1660 1661 // Call interface for AES_encryptBlock, AES_decryptBlock stubs. 1662 // 1663 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 1664 // Z_ARG2 - destination data block. Ptr to leftmost byte to be stored. 1665 // For in-place encryption/decryption, ARG1 and ARG2 can point 1666 // to the same piece of storage. 1667 // Z_ARG3 - Crypto key address (expanded key). The first n bits of 1668 // the expanded key constitute the original AES-<n> key (see below). 1669 // 1670 // Z_RET - return value. First unprocessed byte offset in src buffer. 1671 // 1672 // Some remarks: 1673 // The crypto key, as passed from the caller to these encryption stubs, 1674 // is a so-called expanded key. It is derived from the original key 1675 // by the Rijndael key schedule, see http://en.wikipedia.org/wiki/Rijndael_key_schedule 1676 // With the expanded key, the cipher/decipher task is decomposed in 1677 // multiple, less complex steps, called rounds. Sun SPARC and Intel 1678 // processors obviously implement support for those less complex steps. 1679 // z/Architecture provides instructions for full cipher/decipher complexity. 1680 // Therefore, we need the original, not the expanded key here. 1681 // Luckily, the first n bits of an AES-<n> expanded key are formed 1682 // by the original key itself. That takes us out of trouble. :-) 1683 // The key length (in bytes) relation is as follows: 1684 // original expanded rounds key bit keylen 1685 // key bytes key bytes length in words 1686 // 16 176 11 128 44 1687 // 24 208 13 192 52 1688 // 32 240 15 256 60 1689 // 1690 // The crypto instructions used in the AES* stubs have some specific register requirements. 1691 // Z_R0 holds the crypto function code. Please refer to the KM/KMC instruction 1692 // description in the "z/Architecture Principles of Operation" manual for details. 1693 // Z_R1 holds the parameter block address. The parameter block contains the cryptographic key 1694 // (KM instruction) and the chaining value (KMC instruction). 1695 // dst must designate an even-numbered register, holding the address of the output message. 1696 // src must designate an even/odd register pair, holding the address/length of the original message 1697 1698 // Helper function which generates code to 1699 // - load the function code in register fCode (== Z_R0) 1700 // - load the data block length (depends on cipher function) in register srclen if requested. 1701 // - is_decipher switches between cipher/decipher function codes 1702 // - set_len requests (if true) loading the data block length in register srclen 1703 void generate_load_AES_fCode(Register keylen, Register fCode, Register srclen, bool is_decipher) { 1704 1705 BLOCK_COMMENT("Set fCode {"); { 1706 Label fCode_set; 1707 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1708 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) 1709 && (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1710 // Expanded key length is 44/52/60 * 4 bytes for AES-128/AES-192/AES-256. 1711 __ z_cghi(keylen, 52); 1712 __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode); 1713 if (!identical_dataBlk_len) { 1714 __ z_lghi(srclen, VM_Version::Cipher::_AES256_dataBlk); 1715 } 1716 __ z_brh(fCode_set); // keyLen > 52: AES256 1717 1718 __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode); 1719 if (!identical_dataBlk_len) { 1720 __ z_lghi(srclen, VM_Version::Cipher::_AES192_dataBlk); 1721 } 1722 __ z_bre(fCode_set); // keyLen == 52: AES192 1723 1724 __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode); 1725 if (!identical_dataBlk_len) { 1726 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1727 } 1728 // __ z_brl(fCode_set); // keyLen < 52: AES128 // fallthru 1729 __ bind(fCode_set); 1730 if (identical_dataBlk_len) { 1731 __ z_lghi(srclen, VM_Version::Cipher::_AES128_dataBlk); 1732 } 1733 } 1734 BLOCK_COMMENT("} Set fCode"); 1735 } 1736 1737 // Push a parameter block for the cipher/decipher instruction on the stack. 1738 // NOTE: 1739 // Before returning, the stub has to copy the chaining value from 1740 // the parmBlk, where it was updated by the crypto instruction, back 1741 // to the chaining value array the address of which was passed in the cv argument. 1742 // As all the available registers are used and modified by KMC, we need to save 1743 // the key length across the KMC instruction. We do so by spilling it to the stack, 1744 // just preceding the parmBlk (at (parmBlk - 8)). 1745 void generate_push_parmBlk(Register keylen, Register fCode, Register parmBlk, Register key, Register cv, bool is_decipher) { 1746 const int AES_parmBlk_align = 32; 1747 const int AES_parmBlk_addspace = AES_parmBlk_align; // Must be multiple of AES_parmblk_align. 1748 int cv_len, key_len; 1749 int mode = is_decipher ? VM_Version::CipherMode::decipher : VM_Version::CipherMode::cipher; 1750 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1751 1752 BLOCK_COMMENT("push parmBlk {"); 1753 if (VM_Version::has_Crypto_AES() ) { __ z_cghi(keylen, 52); } 1754 if (VM_Version::has_Crypto_AES256()) { __ z_brh(parmBlk_256); } // keyLen > 52: AES256 1755 if (VM_Version::has_Crypto_AES192()) { __ z_bre(parmBlk_192); } // keyLen == 52: AES192 1756 if (VM_Version::has_Crypto_AES128()) { __ z_brl(parmBlk_128); } // keyLen < 52: AES128 1757 1758 // Security net: requested AES function not available on this CPU. 1759 // NOTE: 1760 // As of now (March 2015), this safety net is not required. JCE policy files limit the 1761 // cryptographic strength of the keys used to 128 bit. If we have AES hardware support 1762 // at all, we have at least AES-128. 1763 __ stop_static("AES key strength not supported by CPU. Use -XX:-UseAES as remedy.", 0); 1764 1765 if (VM_Version::has_Crypto_AES128()) { 1766 __ bind(parmBlk_128); 1767 cv_len = VM_Version::Cipher::_AES128_dataBlk; 1768 key_len = VM_Version::Cipher::_AES128_parmBlk_C - cv_len; 1769 __ z_lay(parmBlk, -(VM_Version::Cipher::_AES128_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP); 1770 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // align parameter block 1771 1772 // Resize the frame to accommodate for the aligned parameter block and other stuff. 1773 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk). 1774 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1775 __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert. 1776 __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc.. 1777 __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP. 1778 __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address. 1779 1780 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1781 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1782 __ z_lghi(fCode, VM_Version::Cipher::_AES128 + mode); 1783 if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) { 1784 __ z_bru(parmBlk_set); // Fallthru otherwise. 1785 } 1786 } 1787 1788 if (VM_Version::has_Crypto_AES192()) { 1789 __ bind(parmBlk_192); 1790 cv_len = VM_Version::Cipher::_AES192_dataBlk; 1791 key_len = VM_Version::Cipher::_AES192_parmBlk_C - cv_len; 1792 __ z_lay(parmBlk, -(VM_Version::Cipher::_AES192_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP); 1793 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 1794 1795 // Resize the frame to accommodate for the aligned parameter block and other stuff. 1796 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk). 1797 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1798 __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert. 1799 __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc.. 1800 __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP. 1801 __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address. 1802 1803 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1804 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1805 __ z_lghi(fCode, VM_Version::Cipher::_AES192 + mode); 1806 if (VM_Version::has_Crypto_AES256()) { 1807 __ z_bru(parmBlk_set); // Fallthru otherwise. 1808 } 1809 } 1810 1811 if (VM_Version::has_Crypto_AES256()) { 1812 __ bind(parmBlk_256); 1813 cv_len = VM_Version::Cipher::_AES256_dataBlk; 1814 key_len = VM_Version::Cipher::_AES256_parmBlk_C - cv_len; 1815 __ z_lay(parmBlk, -(VM_Version::Cipher::_AES256_parmBlk_C+AES_parmBlk_align)+(AES_parmBlk_align-1), Z_SP); 1816 __ z_nill(parmBlk, (~(AES_parmBlk_align-1)) & 0xffff); // Align parameter block. 1817 1818 // Resize the frame to accommodate for the aligned parameter block and other stuff. 1819 // There is room for stuff in the range [parmBlk-AES_parmBlk_addspace, parmBlk). 1820 __ z_stg(keylen, -8, parmBlk); // Spill keylen for later use. 1821 __ z_stg(Z_SP, -16, parmBlk); // Spill SP for easy revert. 1822 __ z_aghi(parmBlk, -AES_parmBlk_addspace); // Additional space for keylen, etc.. 1823 __ resize_frame_absolute(parmBlk, keylen, true); // Resize frame with parmBlk being the new SP. 1824 __ z_aghi(parmBlk, AES_parmBlk_addspace); // Restore parameter block address. 1825 1826 __ z_mvc(0, cv_len-1, parmBlk, 0, cv); // Copy cv. 1827 __ z_mvc(cv_len, key_len-1, parmBlk, 0, key); // Copy key. 1828 __ z_lghi(fCode, VM_Version::Cipher::_AES256 + mode); 1829 // __ z_bru(parmBlk_set); // fallthru 1830 } 1831 1832 __ bind(parmBlk_set); 1833 BLOCK_COMMENT("} push parmBlk"); 1834 } 1835 1836 // Pop a parameter block from the stack. The chaining value portion of the parameter block 1837 // is copied back to the cv array as it is needed for subsequent cipher steps. 1838 // The keylen value as well as the original SP (before resizing) was pushed to the stack 1839 // when pushing the parameter block. 1840 void generate_pop_parmBlk(Register keylen, Register parmBlk, Register key, Register cv) { 1841 1842 BLOCK_COMMENT("pop parmBlk {"); 1843 bool identical_dataBlk_len = (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES192_dataBlk) && 1844 (VM_Version::Cipher::_AES128_dataBlk == VM_Version::Cipher::_AES256_dataBlk); 1845 if (identical_dataBlk_len) { 1846 int cv_len = VM_Version::Cipher::_AES128_dataBlk; 1847 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1848 } else { 1849 int cv_len; 1850 Label parmBlk_128, parmBlk_192, parmBlk_256, parmBlk_set; 1851 __ z_lg(keylen, -8, parmBlk); // restore keylen 1852 __ z_cghi(keylen, 52); 1853 if (VM_Version::has_Crypto_AES256()) __ z_brh(parmBlk_256); // keyLen > 52: AES256 1854 if (VM_Version::has_Crypto_AES192()) __ z_bre(parmBlk_192); // keyLen == 52: AES192 1855 // if (VM_Version::has_Crypto_AES128()) __ z_brl(parmBlk_128); // keyLen < 52: AES128 // fallthru 1856 1857 // Security net: there is no one here. If we would need it, we should have 1858 // fallen into it already when pushing the parameter block. 1859 if (VM_Version::has_Crypto_AES128()) { 1860 __ bind(parmBlk_128); 1861 cv_len = VM_Version::Cipher::_AES128_dataBlk; 1862 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1863 if (VM_Version::has_Crypto_AES192() || VM_Version::has_Crypto_AES256()) { 1864 __ z_bru(parmBlk_set); 1865 } 1866 } 1867 1868 if (VM_Version::has_Crypto_AES192()) { 1869 __ bind(parmBlk_192); 1870 cv_len = VM_Version::Cipher::_AES192_dataBlk; 1871 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1872 if (VM_Version::has_Crypto_AES256()) { 1873 __ z_bru(parmBlk_set); 1874 } 1875 } 1876 1877 if (VM_Version::has_Crypto_AES256()) { 1878 __ bind(parmBlk_256); 1879 cv_len = VM_Version::Cipher::_AES256_dataBlk; 1880 __ z_mvc(0, cv_len-1, cv, 0, parmBlk); // Copy cv. 1881 // __ z_bru(parmBlk_set); // fallthru 1882 } 1883 __ bind(parmBlk_set); 1884 } 1885 __ z_lg(Z_SP, -16, parmBlk); // Revert resize_frame_absolute. 1886 BLOCK_COMMENT("} pop parmBlk"); 1887 } 1888 1889 // Compute AES encrypt function. 1890 address generate_AES_encryptBlock(const char* name) { 1891 __ align(CodeEntryAlignment); 1892 StubCodeMark mark(this, "StubRoutines", name); 1893 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1894 1895 Register from = Z_ARG1; // source byte array 1896 Register to = Z_ARG2; // destination byte array 1897 Register key = Z_ARG3; // expanded key array 1898 1899 const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length. 1900 const Register fCode = Z_R0; // crypto function code 1901 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1902 const Register src = Z_ARG1; // is Z_R2 1903 const Register srclen = Z_ARG2; // Overwrites destination address. 1904 const Register dst = Z_ARG3; // Overwrites expanded key address. 1905 1906 // Read key len of expanded key (in 4-byte words). 1907 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1908 1909 // Copy arguments to registers as required by crypto instruction. 1910 __ z_lgr(parmBlk, key); // crypto key (in T_INT array). 1911 // __ z_lgr(src, from); // Copy not needed, src/from are identical. 1912 __ z_lgr(dst, to); // Copy destination address to even register. 1913 1914 // Construct function code in Z_R0, data block length in Z_ARG2. 1915 generate_load_AES_fCode(keylen, fCode, srclen, false); 1916 1917 __ km(dst, src); // Cipher the message. 1918 1919 __ z_br(Z_R14); 1920 1921 return __ addr_at(start_off); 1922 } 1923 1924 // Compute AES decrypt function. 1925 address generate_AES_decryptBlock(const char* name) { 1926 __ align(CodeEntryAlignment); 1927 StubCodeMark mark(this, "StubRoutines", name); 1928 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1929 1930 Register from = Z_ARG1; // source byte array 1931 Register to = Z_ARG2; // destination byte array 1932 Register key = Z_ARG3; // expanded key array, not preset at entry!!! 1933 1934 const Register keylen = Z_R0; // Temporarily (until fCode is set) holds the expanded key array length. 1935 const Register fCode = Z_R0; // crypto function code 1936 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1937 const Register src = Z_ARG1; // is Z_R2 1938 const Register srclen = Z_ARG2; // Overwrites destination address. 1939 const Register dst = Z_ARG3; // Overwrites key address. 1940 1941 // Read key len of expanded key (in 4-byte words). 1942 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1943 1944 // Copy arguments to registers as required by crypto instruction. 1945 __ z_lgr(parmBlk, key); // Copy crypto key address. 1946 // __ z_lgr(src, from); // Copy not needed, src/from are identical. 1947 __ z_lgr(dst, to); // Copy destination address to even register. 1948 1949 // Construct function code in Z_R0, data block length in Z_ARG2. 1950 generate_load_AES_fCode(keylen, fCode, srclen, true); 1951 1952 __ km(dst, src); // Cipher the message. 1953 1954 __ z_br(Z_R14); 1955 1956 return __ addr_at(start_off); 1957 } 1958 1959 // These stubs receive the addresses of the cryptographic key and of the chaining value as two separate 1960 // arguments (registers "key" and "cv", respectively). The KMC instruction, on the other hand, requires 1961 // chaining value and key to be, in this sequence, adjacent in storage. Thus, we need to allocate some 1962 // thread-local working storage. Using heap memory incurs all the hassles of allocating/freeing. 1963 // Stack space, on the contrary, is deallocated automatically when we return from the stub to the caller. 1964 // *** WARNING *** 1965 // Please note that we do not formally allocate stack space, nor do we 1966 // update the stack pointer. Therefore, no function calls are allowed 1967 // and nobody else must use the stack range where the parameter block 1968 // is located. 1969 // We align the parameter block to the next available octoword. 1970 // 1971 // Compute chained AES encrypt function. 1972 address generate_cipherBlockChaining_AES_encrypt(const char* name) { 1973 __ align(CodeEntryAlignment); 1974 StubCodeMark mark(this, "StubRoutines", name); 1975 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 1976 1977 Register from = Z_ARG1; // source byte array (clear text) 1978 Register to = Z_ARG2; // destination byte array (ciphered) 1979 Register key = Z_ARG3; // expanded key array. 1980 Register cv = Z_ARG4; // chaining value 1981 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned 1982 // in Z_RET upon completion of this stub. Is 32-bit integer. 1983 1984 const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only. 1985 const Register fCode = Z_R0; // crypto function code 1986 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 1987 const Register src = Z_ARG1; // is Z_R2 1988 const Register srclen = Z_ARG2; // Overwrites destination address. 1989 const Register dst = Z_ARG3; // Overwrites key address. 1990 1991 // Read key len of expanded key (in 4-byte words). 1992 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 1993 1994 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block. 1995 // Construct function code in Z_R0. 1996 generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, false); 1997 1998 // Prepare other registers for instruction. 1999 // __ z_lgr(src, from); // Not needed, registers are the same. 2000 __ z_lgr(dst, to); 2001 __ z_llgfr(srclen, msglen); // We pass the offsets as ints, not as longs as required. 2002 2003 __ kmc(dst, src); // Cipher the message. 2004 2005 generate_pop_parmBlk(keylen, parmBlk, key, cv); 2006 2007 __ z_llgfr(Z_RET, msglen); // We pass the offsets as ints, not as longs as required. 2008 __ z_br(Z_R14); 2009 2010 return __ addr_at(start_off); 2011 } 2012 2013 // Compute chained AES encrypt function. 2014 address generate_cipherBlockChaining_AES_decrypt(const char* name) { 2015 __ align(CodeEntryAlignment); 2016 StubCodeMark mark(this, "StubRoutines", name); 2017 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2018 2019 Register from = Z_ARG1; // source byte array (ciphered) 2020 Register to = Z_ARG2; // destination byte array (clear text) 2021 Register key = Z_ARG3; // expanded key array, not preset at entry!!! 2022 Register cv = Z_ARG4; // chaining value 2023 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned 2024 // in Z_RET upon completion of this stub. 2025 2026 const Register keylen = Z_R0; // Expanded key length, as read from key array. Temp only. 2027 const Register fCode = Z_R0; // crypto function code 2028 const Register parmBlk = Z_R1; // parameter block address (points to crypto key) 2029 const Register src = Z_ARG1; // is Z_R2 2030 const Register srclen = Z_ARG2; // Overwrites destination address. 2031 const Register dst = Z_ARG3; // Overwrites key address. 2032 2033 // Read key len of expanded key (in 4-byte words). 2034 __ z_lgf(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT))); 2035 2036 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block. 2037 // Construct function code in Z_R0. 2038 generate_push_parmBlk(keylen, fCode, parmBlk, key, cv, true); 2039 2040 // Prepare other registers for instruction. 2041 // __ z_lgr(src, from); // Not needed, registers are the same. 2042 __ z_lgr(dst, to); 2043 __ z_lgr(srclen, msglen); 2044 2045 __ kmc(dst, src); // Decipher the message. 2046 2047 generate_pop_parmBlk(keylen, parmBlk, key, cv); 2048 2049 __ z_lgr(Z_RET, msglen); 2050 __ z_br(Z_R14); 2051 2052 return __ addr_at(start_off); 2053 } 2054 2055 2056 // Call interface for all SHA* stubs. 2057 // 2058 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed. 2059 // Z_ARG2 - current SHA state. Ptr to state area. This area serves as 2060 // parameter block as required by the crypto instruction. 2061 // Z_ARG3 - current byte offset in source data block. 2062 // Z_ARG4 - last byte offset in source data block. 2063 // (Z_ARG4 - Z_ARG3) gives the #bytes remaining to be processed. 2064 // 2065 // Z_RET - return value. First unprocessed byte offset in src buffer. 2066 // 2067 // A few notes on the call interface: 2068 // - All stubs, whether they are single-block or multi-block, are assumed to 2069 // digest an integer multiple of the data block length of data. All data 2070 // blocks are digested using the intermediate message digest (KIMD) instruction. 2071 // Special end processing, as done by the KLMD instruction, seems to be 2072 // emulated by the calling code. 2073 // 2074 // - Z_ARG1 addresses the first byte of source data. The offset (Z_ARG3) is 2075 // already accounted for. 2076 // 2077 // - The current SHA state (the intermediate message digest value) is contained 2078 // in an area addressed by Z_ARG2. The area size depends on the SHA variant 2079 // and is accessible via the enum VM_Version::MsgDigest::_SHA<n>_parmBlk_I 2080 // 2081 // - The single-block stub is expected to digest exactly one data block, starting 2082 // at the address passed in Z_ARG1. 2083 // 2084 // - The multi-block stub is expected to digest all data blocks which start in 2085 // the offset interval [srcOff(Z_ARG3), srcLimit(Z_ARG4)). The exact difference 2086 // (srcLimit-srcOff), rounded up to the next multiple of the data block length, 2087 // gives the number of blocks to digest. It must be assumed that the calling code 2088 // provides for a large enough source data buffer. 2089 // 2090 // Compute SHA-1 function. 2091 address generate_SHA1_stub(bool multiBlock, const char* name) { 2092 __ align(CodeEntryAlignment); 2093 StubCodeMark mark(this, "StubRoutines", name); 2094 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2095 2096 const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added). 2097 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter for kimd register pairs. 2098 const Register srcOff = Z_ARG3; // int 2099 const Register srcLimit = Z_ARG4; // Only passed in multiBlock case. int 2100 2101 const Register SHAState_local = Z_R1; 2102 const Register SHAState_save = Z_ARG3; 2103 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2104 Label useKLMD, rtn; 2105 2106 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA1); // function code 2107 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2108 2109 if (multiBlock) { // Process everything from offset to limit. 2110 2111 // The following description is valid if we get a raw (unpimped) source data buffer, 2112 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2113 // the calling convention for these stubs is different. We leave the description in 2114 // to inform the reader what must be happening hidden in the calling code. 2115 // 2116 // The data block to be processed can have arbitrary length, i.e. its length does not 2117 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2118 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2119 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2120 // to the stack, execute a KLMD instruction on it and copy the result back to the 2121 // caller's SHA state location. 2122 2123 // Total #srcBuff blocks to process. 2124 if (VM_Version::has_DistinctOpnds()) { 2125 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2126 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 2127 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 2128 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2129 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2130 } else { 2131 __ z_lgfr(srcBufLen, srcLimit); // Exact difference. srcLimit passed as int. 2132 __ z_sgfr(srcBufLen, srcOff); // SrcOff passed as int, now properly casted to long. 2133 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); // round up 2134 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA1_dataBlk-1)) & 0xffff); 2135 __ z_lgr(srcLimit, srcOff); // SrcLimit temporarily holds return value. 2136 __ z_agr(srcLimit, srcBufLen); 2137 } 2138 2139 // Integral #blocks to digest? 2140 // As a result of the calculations above, srcBufLen MUST be an integer 2141 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2142 // We insert an asm_assert into the KLMD case to guard against that. 2143 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA1_dataBlk-1); 2144 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2145 2146 // Process all full blocks. 2147 __ kimd(srcBuff); 2148 2149 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2150 } else { // Process one data block only. 2151 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA1_dataBlk); // #srcBuff bytes to process 2152 __ kimd(srcBuff); 2153 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA1_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. No 32 to 64 bit extension needed. 2154 } 2155 2156 __ bind(rtn); 2157 __ z_br(Z_R14); 2158 2159 if (multiBlock) { 2160 __ bind(useKLMD); 2161 2162 #if 1 2163 // Security net: this stub is believed to be called for full-sized data blocks only 2164 // NOTE: The following code is believed to be correct, but is is not tested. 2165 __ stop_static("SHA128 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2166 #endif 2167 } 2168 2169 return __ addr_at(start_off); 2170 } 2171 2172 // Compute SHA-256 function. 2173 address generate_SHA256_stub(bool multiBlock, const char* name) { 2174 __ align(CodeEntryAlignment); 2175 StubCodeMark mark(this, "StubRoutines", name); 2176 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2177 2178 const Register srcBuff = Z_ARG1; 2179 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2180 const Register SHAState_local = Z_R1; 2181 const Register SHAState_save = Z_ARG3; 2182 const Register srcOff = Z_ARG3; 2183 const Register srcLimit = Z_ARG4; 2184 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2185 Label useKLMD, rtn; 2186 2187 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA256); // function code 2188 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2189 2190 if (multiBlock) { // Process everything from offset to limit. 2191 // The following description is valid if we get a raw (unpimped) source data buffer, 2192 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2193 // the calling convention for these stubs is different. We leave the description in 2194 // to inform the reader what must be happening hidden in the calling code. 2195 // 2196 // The data block to be processed can have arbitrary length, i.e. its length does not 2197 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2198 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2199 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2200 // to the stack, execute a KLMD instruction on it and copy the result back to the 2201 // caller's SHA state location. 2202 2203 // total #srcBuff blocks to process 2204 if (VM_Version::has_DistinctOpnds()) { 2205 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2206 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2207 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2208 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2209 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2210 } else { 2211 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2212 __ z_sgfr(srcBufLen, srcOff); 2213 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); // round up 2214 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA256_dataBlk-1)) & 0xffff); 2215 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2216 __ z_agr(srcLimit, srcBufLen); 2217 } 2218 2219 // Integral #blocks to digest? 2220 // As a result of the calculations above, srcBufLen MUST be an integer 2221 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2222 // We insert an asm_assert into the KLMD case to guard against that. 2223 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA256_dataBlk-1); 2224 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2225 2226 // Process all full blocks. 2227 __ kimd(srcBuff); 2228 2229 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2230 } else { // Process one data block only. 2231 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA256_dataBlk); // #srcBuff bytes to process 2232 __ kimd(srcBuff); 2233 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA256_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2234 } 2235 2236 __ bind(rtn); 2237 __ z_br(Z_R14); 2238 2239 if (multiBlock) { 2240 __ bind(useKLMD); 2241 #if 1 2242 // Security net: this stub is believed to be called for full-sized data blocks only. 2243 // NOTE: 2244 // The following code is believed to be correct, but is is not tested. 2245 __ stop_static("SHA256 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2246 #endif 2247 } 2248 2249 return __ addr_at(start_off); 2250 } 2251 2252 // Compute SHA-512 function. 2253 address generate_SHA512_stub(bool multiBlock, const char* name) { 2254 __ align(CodeEntryAlignment); 2255 StubCodeMark mark(this, "StubRoutines", name); 2256 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2257 2258 const Register srcBuff = Z_ARG1; 2259 const Register SHAState = Z_ARG2; // Only on entry. Reused soon thereafter. 2260 const Register SHAState_local = Z_R1; 2261 const Register SHAState_save = Z_ARG3; 2262 const Register srcOff = Z_ARG3; 2263 const Register srcLimit = Z_ARG4; 2264 const Register srcBufLen = Z_ARG2; // Destroys state address, must be copied before. 2265 Label useKLMD, rtn; 2266 2267 __ load_const_optimized(Z_R0, (int)VM_Version::MsgDigest::_SHA512); // function code 2268 __ z_lgr(SHAState_local, SHAState); // SHAState == parameter block 2269 2270 if (multiBlock) { // Process everything from offset to limit. 2271 // The following description is valid if we get a raw (unpimped) source data buffer, 2272 // spanning the range between [srcOff(Z_ARG3), srcLimit(Z_ARG4)). As detailled above, 2273 // the calling convention for these stubs is different. We leave the description in 2274 // to inform the reader what must be happening hidden in the calling code. 2275 // 2276 // The data block to be processed can have arbitrary length, i.e. its length does not 2277 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement 2278 // two different paths. If the length is an integer multiple, we use KIMD, saving us 2279 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state 2280 // to the stack, execute a KLMD instruction on it and copy the result back to the 2281 // caller's SHA state location. 2282 2283 // total #srcBuff blocks to process 2284 if (VM_Version::has_DistinctOpnds()) { 2285 __ z_srk(srcBufLen, srcLimit, srcOff); // exact difference 2286 __ z_ahi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2287 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2288 __ z_ark(srcLimit, srcOff, srcBufLen); // Srclimit temporarily holds return value. 2289 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit. 2290 } else { 2291 __ z_lgfr(srcBufLen, srcLimit); // exact difference 2292 __ z_sgfr(srcBufLen, srcOff); 2293 __ z_aghi(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); // round up 2294 __ z_nill(srcBufLen, (~(VM_Version::MsgDigest::_SHA512_dataBlk-1)) & 0xffff); 2295 __ z_lgr(srcLimit, srcOff); // Srclimit temporarily holds return value. 2296 __ z_agr(srcLimit, srcBufLen); 2297 } 2298 2299 // integral #blocks to digest? 2300 // As a result of the calculations above, srcBufLen MUST be an integer 2301 // multiple of _SHA1_dataBlk, or else we are in big trouble. 2302 // We insert an asm_assert into the KLMD case to guard against that. 2303 __ z_tmll(srcBufLen, VM_Version::MsgDigest::_SHA512_dataBlk-1); 2304 __ z_brc(Assembler::bcondNotAllZero, useKLMD); 2305 2306 // Process all full blocks. 2307 __ kimd(srcBuff); 2308 2309 __ z_lgr(Z_RET, srcLimit); // Offset of first unprocessed byte in buffer. 2310 } else { // Process one data block only. 2311 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA512_dataBlk); // #srcBuff bytes to process 2312 __ kimd(srcBuff); 2313 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA512_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. 2314 } 2315 2316 __ bind(rtn); 2317 __ z_br(Z_R14); 2318 2319 if (multiBlock) { 2320 __ bind(useKLMD); 2321 #if 1 2322 // Security net: this stub is believed to be called for full-sized data blocks only 2323 // NOTE: 2324 // The following code is believed to be correct, but is is not tested. 2325 __ stop_static("SHA512 stub can digest full data blocks only. Use -XX:-UseSHA as remedy.", 0); 2326 #endif 2327 } 2328 2329 return __ addr_at(start_off); 2330 } 2331 2332 2333 2334 // Arguments: 2335 // Z_ARG1 - int crc 2336 // Z_ARG2 - byte* buf 2337 // Z_ARG3 - int length (of buffer) 2338 // 2339 // Result: 2340 // Z_RET - int crc result 2341 // 2342 // Compute CRC32 function. 2343 address generate_CRC32_updateBytes(const char* name) { 2344 __ align(CodeEntryAlignment); 2345 StubCodeMark mark(this, "StubRoutines", name); 2346 unsigned int start_off = __ offset(); // Remember stub start address (is rtn value). 2347 2348 // arguments to kernel_crc32: 2349 Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int. 2350 Register data = Z_ARG2; // source byte array 2351 Register dataLen = Z_ARG3; // #bytes to process, int 2352 Register table = Z_ARG4; // crc table address 2353 const Register t0 = Z_R10; // work reg for kernel* emitters 2354 const Register t1 = Z_R11; // work reg for kernel* emitters 2355 const Register t2 = Z_R12; // work reg for kernel* emitters 2356 const Register t3 = Z_R13; // work reg for kernel* emitters 2357 2358 assert_different_registers(crc, data, dataLen, table); 2359 2360 // We pass these values as ints, not as longs as required by C calling convention. 2361 // Crc used as int. 2362 __ z_llgfr(dataLen, dataLen); 2363 2364 StubRoutines::zarch::generate_load_crc_table_addr(_masm, table); 2365 2366 __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2367 __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers. 2368 __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3); 2369 __ z_lmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 back from stack. 2370 __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers. 2371 2372 __ z_llgfr(Z_RET, crc); // Updated crc is function result. No copying required, just zero upper 32 bits. 2373 __ z_br(Z_R14); // Result already in Z_RET == Z_ARG1. 2374 2375 return __ addr_at(start_off); 2376 } 2377 2378 2379 // Arguments: 2380 // Z_ARG1 - x address 2381 // Z_ARG2 - x length 2382 // Z_ARG3 - y address 2383 // Z_ARG4 - y length 2384 // Z_ARG5 - z address 2385 // 160[Z_SP] - z length 2386 address generate_multiplyToLen() { 2387 __ align(CodeEntryAlignment); 2388 StubCodeMark mark(this, "StubRoutines", "multiplyToLen"); 2389 2390 address start = __ pc(); 2391 2392 const Register x = Z_ARG1; 2393 const Register xlen = Z_ARG2; 2394 const Register y = Z_ARG3; 2395 const Register ylen = Z_ARG4; 2396 const Register z = Z_ARG5; 2397 // zlen is passed on the stack: 2398 // Address zlen(Z_SP, _z_abi(remaining_cargs)); 2399 2400 // Next registers will be saved on stack in multiply_to_len(). 2401 const Register tmp1 = Z_tmp_1; 2402 const Register tmp2 = Z_tmp_2; 2403 const Register tmp3 = Z_tmp_3; 2404 const Register tmp4 = Z_tmp_4; 2405 const Register tmp5 = Z_R9; 2406 2407 BLOCK_COMMENT("Entry:"); 2408 2409 __ z_llgfr(xlen, xlen); 2410 __ z_llgfr(ylen, ylen); 2411 2412 __ multiply_to_len(x, xlen, y, ylen, z, tmp1, tmp2, tmp3, tmp4, tmp5); 2413 2414 __ z_br(Z_R14); // Return to caller. 2415 2416 return start; 2417 } 2418 2419 void generate_initial() { 2420 // Generates all stubs and initializes the entry points. 2421 2422 // Entry points that exist in all platforms. 2423 // Note: This is code that could be shared among different 2424 // platforms - however the benefit seems to be smaller than the 2425 // disadvantage of having a much more complicated generator 2426 // structure. See also comment in stubRoutines.hpp. 2427 StubRoutines::_forward_exception_entry = generate_forward_exception(); 2428 2429 StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); 2430 StubRoutines::_catch_exception_entry = generate_catch_exception(); 2431 2432 // Build this early so it's available for the interpreter. 2433 StubRoutines::_throw_StackOverflowError_entry = 2434 generate_throw_exception("StackOverflowError throw_exception", 2435 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2436 2437 //---------------------------------------------------------------------- 2438 // Entry points that are platform specific. 2439 // Build this early so it's available for the interpreter. 2440 StubRoutines::_throw_StackOverflowError_entry = 2441 generate_throw_exception("StackOverflowError throw_exception", 2442 CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false); 2443 2444 if (UseCRC32Intrinsics) { 2445 // We have no CRC32 table on z/Architecture. 2446 StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table; 2447 StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes"); 2448 } 2449 2450 // Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction. 2451 StubRoutines::zarch::_trot_table_addr = (address)StubRoutines::zarch::_trot_table; 2452 } 2453 2454 2455 void generate_all() { 2456 // Generates all stubs and initializes the entry points. 2457 2458 StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check(); 2459 2460 // These entry points require SharedInfo::stack0 to be set up in non-core builds. 2461 StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false); 2462 StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false); 2463 StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false); 2464 2465 StubRoutines::zarch::_handler_for_unsafe_access_entry = generate_handler_for_unsafe_access(); 2466 2467 // Support for verify_oop (must happen after universe_init). 2468 StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine(); 2469 2470 // Arraycopy stubs used by compilers. 2471 generate_arraycopy_stubs(); 2472 2473 // safefetch stubs 2474 generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, &StubRoutines::_safefetch32_fault_pc, &StubRoutines::_safefetch32_continuation_pc); 2475 generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, &StubRoutines::_safefetchN_fault_pc, &StubRoutines::_safefetchN_continuation_pc); 2476 2477 // Generate AES intrinsics code. 2478 if (UseAESIntrinsics) { 2479 StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock"); 2480 StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock"); 2481 StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining"); 2482 StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining"); 2483 } 2484 2485 // Generate SHA1/SHA256/SHA512 intrinsics code. 2486 if (UseSHA1Intrinsics) { 2487 StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock"); 2488 StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock"); 2489 } 2490 if (UseSHA256Intrinsics) { 2491 StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock"); 2492 StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock"); 2493 } 2494 if (UseSHA512Intrinsics) { 2495 StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock"); 2496 StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock"); 2497 } 2498 2499 #ifdef COMPILER2 2500 if (UseMultiplyToLenIntrinsic) { 2501 StubRoutines::_multiplyToLen = generate_multiplyToLen(); 2502 } 2503 if (UseMontgomeryMultiplyIntrinsic) { 2504 StubRoutines::_montgomeryMultiply 2505 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply); 2506 } 2507 if (UseMontgomerySquareIntrinsic) { 2508 StubRoutines::_montgomerySquare 2509 = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square); 2510 } 2511 #endif 2512 } 2513 2514 public: 2515 StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) { 2516 // Replace the standard masm with a special one: 2517 _masm = new MacroAssembler(code); 2518 2519 _stub_count = !all ? 0x100 : 0x200; 2520 if (all) { 2521 generate_all(); 2522 } else { 2523 generate_initial(); 2524 } 2525 } 2526 2527 private: 2528 int _stub_count; 2529 void stub_prolog(StubCodeDesc* cdesc) { 2530 #ifdef ASSERT 2531 // Put extra information in the stub code, to make it more readable. 2532 // Write the high part of the address. 2533 // [RGV] Check if there is a dependency on the size of this prolog. 2534 __ emit_32((intptr_t)cdesc >> 32); 2535 __ emit_32((intptr_t)cdesc); 2536 __ emit_32(++_stub_count); 2537 #endif 2538 align(true); 2539 } 2540 2541 void align(bool at_header = false) { 2542 // z/Architecture cache line size is 256 bytes. 2543 // There is no obvious benefit in aligning stub 2544 // code to cache lines. Use CodeEntryAlignment instead. 2545 const unsigned int icache_line_size = CodeEntryAlignment; 2546 const unsigned int icache_half_line_size = MIN2<unsigned int>(32, CodeEntryAlignment); 2547 2548 if (at_header) { 2549 while ((intptr_t)(__ pc()) % icache_line_size != 0) { 2550 __ emit_16(0); 2551 } 2552 } else { 2553 while ((intptr_t)(__ pc()) % icache_half_line_size != 0) { 2554 __ z_nop(); 2555 } 2556 } 2557 } 2558 2559 }; 2560 2561 void StubGenerator_generate(CodeBuffer* code, bool all) { 2562 StubGenerator g(code, all); 2563 }